Providers: Implemented anthropic, gemini, openai & azureopenai

This commit is contained in:
Manas Hejmadi
2025-06-22 23:07:06 +05:30
parent a6d1c410d6
commit 69e3882357
6 changed files with 417 additions and 2 deletions

View File

@@ -0,0 +1,86 @@
import 'dart:convert';
import 'package:better_networking/better_networking.dart';
import 'package:shared_preferences/shared_preferences.dart';
class LLMManager {
static Map avaiableModels = {
"gemini": [
["gemini-2.0-flash", "Gemini 2.0 Flash"],
],
};
static get models => avaiableModels;
static const String modelRemoteURL =
'https://raw.githubusercontent.com/synapsecode/apidash/llm_model_rearch/packages/genai/models.json';
static const String baseOllamaURL = 'http://localhost:11434';
static addLLM(String providerID, String modelID, String modelName) async {
avaiableModels[providerID] = [
...avaiableModels[providerID],
[modelID, modelName],
];
await saveAvailableLLMs(avaiableModels);
}
static removeLLM(String providerID, String modelID, String modelName) async {
List z = avaiableModels[providerID] as List;
z = z.where((x) => x[0] != modelID && x[1] != modelName).toList();
avaiableModels[providerID] = z;
await saveAvailableLLMs(avaiableModels);
}
static fetchAvailableLLMs([String? remoteURL, String? ollamaURL]) async {
//get LLMs from remove
final (resp, _, __) = await sendHttpRequest(
'FETCH_MODELS',
APIType.rest,
HttpRequestModel(url: remoteURL ?? modelRemoteURL, method: HTTPVerb.get),
);
if (resp == null) {
throw Exception('UNABLE TO FETCH MODELS');
}
Map remoteModels = jsonDecode(resp.body);
final oM = await fetchInstalledOllamaModels(ollamaURL);
remoteModels['ollama'] = oM;
saveAvailableLLMs(remoteModels);
loadAvailableLLMs();
}
static saveAvailableLLMs(Map updatedLLMs) async {
SharedPreferences prefs = await SharedPreferences.getInstance();
await prefs.setString('genai_available_llms', jsonEncode(updatedLLMs));
}
static loadAvailableLLMs() async {
SharedPreferences prefs = await SharedPreferences.getInstance();
final avl = prefs.getString('genai_available_llms');
if (avl != null) {
avaiableModels = (jsonDecode(avl));
}
}
static clearAvailableLLMs() async {
SharedPreferences prefs = await SharedPreferences.getInstance();
prefs.remove('genai_available_llms');
}
static Future<List?> fetchInstalledOllamaModels([String? ollamaURL]) async {
final url = "${ollamaURL ?? baseOllamaURL}/api/tags";
final (resp, _, __) = await sendHttpRequest(
'OLLAMA_FETCH',
APIType.rest,
HttpRequestModel(url: url, method: HTTPVerb.get),
noSSL: true,
);
if (resp == null) return [];
final output = jsonDecode(resp.body);
final models = output['models'];
if (models == null) return [];
List ollamaModels = [];
for (final m in models) {
ollamaModels.add([m['model'], m['name']]);
}
return ollamaModels;
}
}

View File

@@ -0,0 +1,74 @@
import '../llm_config.dart';
import '../llm_input_payload.dart';
import '../llm_request.dart';
import 'common.dart';
class AnthropicModelController extends ModelController {
static final instance = AnthropicModelController();
@override
LLMInputPayload get inputPayload => LLMInputPayload(
endpoint: 'https://api.anthropic.com/v1/messages',
credential: '',
systemPrompt: '',
userPrompt: '',
configMap: {
LLMConfigName.temperature.name:
defaultLLMConfigurations[LLMConfigName.temperature]!,
LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!,
},
).clone();
@override
LLMRequestDetails createRequest(
LLMModel model,
LLMInputPayload inputPayload, {
bool stream = false,
}) {
return LLMRequestDetails(
endpoint: inputPayload.endpoint,
headers: {
'anthropic-version': '2023-06-01',
'Authorization': 'Bearer ${inputPayload.credential}',
},
method: 'POST',
body: {
"model": model.identifier,
if (stream) ...{'stream': true},
"messages": [
{"role": "system", "content": inputPayload.systemPrompt},
{"role": "user", "content": inputPayload.userPrompt},
],
"temperature":
inputPayload
.configMap[LLMConfigName.temperature.name]
?.configValue
.value
?.$2 ??
0.5,
"top_p":
inputPayload
.configMap[LLMConfigName.top_p.name]
?.configValue
.value
?.$2 ??
0.95,
if (inputPayload.configMap[LLMConfigName.max_tokens.name] != null) ...{
"max_tokens": inputPayload
.configMap[LLMConfigName.max_tokens.name]!
.configValue
.value,
},
},
);
}
@override
String? outputFormatter(Map x) {
return x['content']?[0]['text'];
}
@override
String? streamOutputFormatter(Map x) {
return x['text'];
}
}

View File

@@ -0,0 +1,77 @@
import '../llm_config.dart';
import '../llm_input_payload.dart';
import '../llm_request.dart';
import 'common.dart';
class AzureOpenAIModelController extends ModelController {
static final instance = AzureOpenAIModelController();
@override
LLMInputPayload get inputPayload => LLMInputPayload(
endpoint: '', //TO BE FILLED BY USER
credential: '',
systemPrompt: '',
userPrompt: '',
configMap: {
LLMConfigName.temperature.name:
defaultLLMConfigurations[LLMConfigName.temperature]!,
LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!,
},
).clone();
@override
LLMRequestDetails createRequest(
LLMModel model,
LLMInputPayload inputPayload, {
bool stream = false,
}) {
if (inputPayload.endpoint.isEmpty) {
throw Exception('MODEL ENDPOINT IS EMPTY');
}
return LLMRequestDetails(
endpoint: inputPayload.endpoint,
headers: {'api-key': inputPayload.credential},
method: 'POST',
body: {
if (stream) ...{'stream': true},
"messages": [
{"role": "system", "content": inputPayload.systemPrompt},
if (inputPayload.userPrompt.isNotEmpty) ...{
{"role": "user", "content": inputPayload.userPrompt},
} else ...{
{"role": "user", "content": "Generate"},
},
],
"temperature":
inputPayload
.configMap[LLMConfigName.temperature.name]
?.configValue
.value
?.$2 ??
0.5,
"top_p":
inputPayload
.configMap[LLMConfigName.top_p.name]
?.configValue
.value
?.$2 ??
0.95,
if (inputPayload.configMap[LLMConfigName.max_tokens.name] != null) ...{
"max_tokens": inputPayload
.configMap[LLMConfigName.max_tokens.name]!
.configValue
.value,
},
},
);
}
@override
String? outputFormatter(Map x) {
return x["choices"]?[0]["message"]?["content"]?.trim();
}
@override
String? streamOutputFormatter(Map x) {
return x["choices"]?[0]["delta"]?["content"];
}
}

View File

@@ -0,0 +1,92 @@
import '../llm_config.dart';
import '../llm_input_payload.dart';
import '../llm_request.dart';
import 'common.dart';
class GeminiModelController extends ModelController {
static final instance = GeminiModelController();
@override
LLMInputPayload get inputPayload => LLMInputPayload(
endpoint: 'https://generativelanguage.googleapis.com/v1beta/models',
credential: '',
systemPrompt: '',
userPrompt: '',
configMap: {
LLMConfigName.temperature.name:
defaultLLMConfigurations[LLMConfigName.temperature]!,
LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!,
},
).clone();
@override
LLMRequestDetails createRequest(
LLMModel model,
LLMInputPayload inputPayload, {
bool stream = false,
}) {
String endpoint = inputPayload.endpoint;
endpoint =
"$endpoint/${model.identifier}:generateContent?key=${inputPayload.credential}";
if (stream) {
endpoint = endpoint.replaceAll(
'generateContent?',
'streamGenerateContent?alt=sse&',
);
}
return LLMRequestDetails(
endpoint: endpoint,
headers: {},
method: 'POST',
body: {
"model": model.identifier,
"contents": [
{
"role": "user",
"parts": [
{"text": inputPayload.userPrompt},
],
},
],
"systemInstruction": {
"role": "system",
"parts": [
{"text": inputPayload.systemPrompt},
],
},
"generationConfig": {
"temperature":
inputPayload
.configMap[LLMConfigName.temperature.name]
?.configValue
.value
?.$2 ??
0.5,
"topP":
inputPayload
.configMap[LLMConfigName.top_p.name]
?.configValue
.value
?.$2 ??
0.95,
if (inputPayload.configMap[LLMConfigName.max_tokens.name] !=
null) ...{
"maxOutputTokens": inputPayload
.configMap[LLMConfigName.max_tokens.name]!
.configValue
.value,
},
},
},
);
}
@override
String? outputFormatter(Map x) {
return x['candidates']?[0]?['content']?['parts']?[0]?['text'];
}
@override
String? streamOutputFormatter(Map x) {
return x['candidates']?[0]?['content']?['parts']?[0]?['text'];
}
}

View File

@@ -0,0 +1,76 @@
import '../llm_config.dart';
import '../llm_input_payload.dart';
import '../llm_request.dart';
import 'common.dart';
class OpenAIModelController extends ModelController {
static final instance = OpenAIModelController();
@override
LLMInputPayload get inputPayload => LLMInputPayload(
endpoint: 'https://api.openai.com/v1/chat/completions',
credential: '',
systemPrompt: '',
userPrompt: '',
configMap: {
LLMConfigName.temperature.name:
defaultLLMConfigurations[LLMConfigName.temperature]!,
LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!,
},
).clone();
@override
LLMRequestDetails createRequest(
LLMModel model,
LLMInputPayload inputPayload, {
bool stream = false,
}) {
return LLMRequestDetails(
endpoint: inputPayload.endpoint,
headers: {'Authorization': "Bearer ${inputPayload.credential}"},
method: 'POST',
body: {
'model': model.identifier,
if (stream) ...{'stream': true},
"messages": [
{"role": "system", "content": inputPayload.systemPrompt},
if (inputPayload.userPrompt.isNotEmpty) ...{
{"role": "user", "content": inputPayload.userPrompt},
} else ...{
{"role": "user", "content": "Generate"},
},
],
"temperature":
inputPayload
.configMap[LLMConfigName.temperature.name]
?.configValue
.value
?.$2 ??
0.5,
"top_p":
inputPayload
.configMap[LLMConfigName.top_p.name]
?.configValue
.value
?.$2 ??
0.95,
if (inputPayload.configMap[LLMConfigName.max_tokens.name] != null) ...{
"max_tokens": inputPayload
.configMap[LLMConfigName.max_tokens.name]!
.configValue
.value,
},
},
);
}
@override
String? outputFormatter(Map x) {
return x["choices"]?[0]["message"]?["content"]?.trim();
}
@override
String? streamOutputFormatter(Map x) {
return x["choices"]?[0]["delta"]?["content"];
}
}

View File

@@ -1,6 +1,10 @@
import '../llm_manager.dart';
import 'anthropic.dart';
import 'azureopenai.dart';
import 'common.dart';
import 'gemini.dart';
import 'ollama.dart';
import 'openai.dart';
enum LLMProvider {
gemini('Gemini'),
@@ -27,8 +31,14 @@ enum LLMProvider {
switch (this) {
case LLMProvider.ollama:
return OllamaModelController.instance;
case _:
return OllamaModelController.instance;
case LLMProvider.gemini:
return GeminiModelController.instance;
case LLMProvider.azureopenai:
return AzureOpenAIModelController.instance;
case LLMProvider.openai:
return OpenAIModelController.instance;
case LLMProvider.anthropic:
return AnthropicModelController.instance;
}
}