From 69e388235779075ff004244032461c150fff5715 Mon Sep 17 00:00:00 2001 From: Manas Hejmadi Date: Sun, 22 Jun 2025 23:07:06 +0530 Subject: [PATCH] Providers: Implemented anthropic, gemini, openai & azureopenai --- packages/genai/lib/llm_manager.dart | 86 +++++++++++++++++ packages/genai/lib/providers/anthropic.dart | 74 +++++++++++++++ packages/genai/lib/providers/azureopenai.dart | 77 ++++++++++++++++ packages/genai/lib/providers/gemini.dart | 92 +++++++++++++++++++ packages/genai/lib/providers/openai.dart | 76 +++++++++++++++ packages/genai/lib/providers/providers.dart | 14 ++- 6 files changed, 417 insertions(+), 2 deletions(-) create mode 100644 packages/genai/lib/providers/anthropic.dart create mode 100644 packages/genai/lib/providers/azureopenai.dart create mode 100644 packages/genai/lib/providers/gemini.dart create mode 100644 packages/genai/lib/providers/openai.dart diff --git a/packages/genai/lib/llm_manager.dart b/packages/genai/lib/llm_manager.dart index e69de29b..2baf9179 100644 --- a/packages/genai/lib/llm_manager.dart +++ b/packages/genai/lib/llm_manager.dart @@ -0,0 +1,86 @@ +import 'dart:convert'; +import 'package:better_networking/better_networking.dart'; +import 'package:shared_preferences/shared_preferences.dart'; + +class LLMManager { + static Map avaiableModels = { + "gemini": [ + ["gemini-2.0-flash", "Gemini 2.0 Flash"], + ], + }; + + static get models => avaiableModels; + + static const String modelRemoteURL = + 'https://raw.githubusercontent.com/synapsecode/apidash/llm_model_rearch/packages/genai/models.json'; + static const String baseOllamaURL = 'http://localhost:11434'; + + static addLLM(String providerID, String modelID, String modelName) async { + avaiableModels[providerID] = [ + ...avaiableModels[providerID], + [modelID, modelName], + ]; + await saveAvailableLLMs(avaiableModels); + } + + static removeLLM(String providerID, String modelID, String modelName) async { + List z = avaiableModels[providerID] as List; + z = z.where((x) => x[0] != modelID && x[1] != modelName).toList(); + avaiableModels[providerID] = z; + await saveAvailableLLMs(avaiableModels); + } + + static fetchAvailableLLMs([String? remoteURL, String? ollamaURL]) async { + //get LLMs from remove + final (resp, _, __) = await sendHttpRequest( + 'FETCH_MODELS', + APIType.rest, + HttpRequestModel(url: remoteURL ?? modelRemoteURL, method: HTTPVerb.get), + ); + if (resp == null) { + throw Exception('UNABLE TO FETCH MODELS'); + } + Map remoteModels = jsonDecode(resp.body); + final oM = await fetchInstalledOllamaModels(ollamaURL); + remoteModels['ollama'] = oM; + saveAvailableLLMs(remoteModels); + loadAvailableLLMs(); + } + + static saveAvailableLLMs(Map updatedLLMs) async { + SharedPreferences prefs = await SharedPreferences.getInstance(); + await prefs.setString('genai_available_llms', jsonEncode(updatedLLMs)); + } + + static loadAvailableLLMs() async { + SharedPreferences prefs = await SharedPreferences.getInstance(); + final avl = prefs.getString('genai_available_llms'); + if (avl != null) { + avaiableModels = (jsonDecode(avl)); + } + } + + static clearAvailableLLMs() async { + SharedPreferences prefs = await SharedPreferences.getInstance(); + prefs.remove('genai_available_llms'); + } + + static Future fetchInstalledOllamaModels([String? ollamaURL]) async { + final url = "${ollamaURL ?? baseOllamaURL}/api/tags"; + final (resp, _, __) = await sendHttpRequest( + 'OLLAMA_FETCH', + APIType.rest, + HttpRequestModel(url: url, method: HTTPVerb.get), + noSSL: true, + ); + if (resp == null) return []; + final output = jsonDecode(resp.body); + final models = output['models']; + if (models == null) return []; + List ollamaModels = []; + for (final m in models) { + ollamaModels.add([m['model'], m['name']]); + } + return ollamaModels; + } +} diff --git a/packages/genai/lib/providers/anthropic.dart b/packages/genai/lib/providers/anthropic.dart new file mode 100644 index 00000000..97deaa2a --- /dev/null +++ b/packages/genai/lib/providers/anthropic.dart @@ -0,0 +1,74 @@ +import '../llm_config.dart'; +import '../llm_input_payload.dart'; +import '../llm_request.dart'; +import 'common.dart'; + +class AnthropicModelController extends ModelController { + static final instance = AnthropicModelController(); + @override + LLMInputPayload get inputPayload => LLMInputPayload( + endpoint: 'https://api.anthropic.com/v1/messages', + credential: '', + systemPrompt: '', + userPrompt: '', + configMap: { + LLMConfigName.temperature.name: + defaultLLMConfigurations[LLMConfigName.temperature]!, + LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!, + }, + ).clone(); + + @override + LLMRequestDetails createRequest( + LLMModel model, + LLMInputPayload inputPayload, { + bool stream = false, + }) { + return LLMRequestDetails( + endpoint: inputPayload.endpoint, + headers: { + 'anthropic-version': '2023-06-01', + 'Authorization': 'Bearer ${inputPayload.credential}', + }, + method: 'POST', + body: { + "model": model.identifier, + if (stream) ...{'stream': true}, + "messages": [ + {"role": "system", "content": inputPayload.systemPrompt}, + {"role": "user", "content": inputPayload.userPrompt}, + ], + "temperature": + inputPayload + .configMap[LLMConfigName.temperature.name] + ?.configValue + .value + ?.$2 ?? + 0.5, + "top_p": + inputPayload + .configMap[LLMConfigName.top_p.name] + ?.configValue + .value + ?.$2 ?? + 0.95, + if (inputPayload.configMap[LLMConfigName.max_tokens.name] != null) ...{ + "max_tokens": inputPayload + .configMap[LLMConfigName.max_tokens.name]! + .configValue + .value, + }, + }, + ); + } + + @override + String? outputFormatter(Map x) { + return x['content']?[0]['text']; + } + + @override + String? streamOutputFormatter(Map x) { + return x['text']; + } +} diff --git a/packages/genai/lib/providers/azureopenai.dart b/packages/genai/lib/providers/azureopenai.dart new file mode 100644 index 00000000..18d45667 --- /dev/null +++ b/packages/genai/lib/providers/azureopenai.dart @@ -0,0 +1,77 @@ +import '../llm_config.dart'; +import '../llm_input_payload.dart'; +import '../llm_request.dart'; +import 'common.dart'; + +class AzureOpenAIModelController extends ModelController { + static final instance = AzureOpenAIModelController(); + @override + LLMInputPayload get inputPayload => LLMInputPayload( + endpoint: '', //TO BE FILLED BY USER + credential: '', + systemPrompt: '', + userPrompt: '', + configMap: { + LLMConfigName.temperature.name: + defaultLLMConfigurations[LLMConfigName.temperature]!, + LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!, + }, + ).clone(); + + @override + LLMRequestDetails createRequest( + LLMModel model, + LLMInputPayload inputPayload, { + bool stream = false, + }) { + if (inputPayload.endpoint.isEmpty) { + throw Exception('MODEL ENDPOINT IS EMPTY'); + } + return LLMRequestDetails( + endpoint: inputPayload.endpoint, + headers: {'api-key': inputPayload.credential}, + method: 'POST', + body: { + if (stream) ...{'stream': true}, + "messages": [ + {"role": "system", "content": inputPayload.systemPrompt}, + if (inputPayload.userPrompt.isNotEmpty) ...{ + {"role": "user", "content": inputPayload.userPrompt}, + } else ...{ + {"role": "user", "content": "Generate"}, + }, + ], + "temperature": + inputPayload + .configMap[LLMConfigName.temperature.name] + ?.configValue + .value + ?.$2 ?? + 0.5, + "top_p": + inputPayload + .configMap[LLMConfigName.top_p.name] + ?.configValue + .value + ?.$2 ?? + 0.95, + if (inputPayload.configMap[LLMConfigName.max_tokens.name] != null) ...{ + "max_tokens": inputPayload + .configMap[LLMConfigName.max_tokens.name]! + .configValue + .value, + }, + }, + ); + } + + @override + String? outputFormatter(Map x) { + return x["choices"]?[0]["message"]?["content"]?.trim(); + } + + @override + String? streamOutputFormatter(Map x) { + return x["choices"]?[0]["delta"]?["content"]; + } +} diff --git a/packages/genai/lib/providers/gemini.dart b/packages/genai/lib/providers/gemini.dart new file mode 100644 index 00000000..56b85b87 --- /dev/null +++ b/packages/genai/lib/providers/gemini.dart @@ -0,0 +1,92 @@ +import '../llm_config.dart'; +import '../llm_input_payload.dart'; +import '../llm_request.dart'; +import 'common.dart'; + +class GeminiModelController extends ModelController { + static final instance = GeminiModelController(); + @override + LLMInputPayload get inputPayload => LLMInputPayload( + endpoint: 'https://generativelanguage.googleapis.com/v1beta/models', + credential: '', + systemPrompt: '', + userPrompt: '', + configMap: { + LLMConfigName.temperature.name: + defaultLLMConfigurations[LLMConfigName.temperature]!, + LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!, + }, + ).clone(); + + @override + LLMRequestDetails createRequest( + LLMModel model, + LLMInputPayload inputPayload, { + bool stream = false, + }) { + String endpoint = inputPayload.endpoint; + endpoint = + "$endpoint/${model.identifier}:generateContent?key=${inputPayload.credential}"; + if (stream) { + endpoint = endpoint.replaceAll( + 'generateContent?', + 'streamGenerateContent?alt=sse&', + ); + } + return LLMRequestDetails( + endpoint: endpoint, + headers: {}, + method: 'POST', + body: { + "model": model.identifier, + "contents": [ + { + "role": "user", + "parts": [ + {"text": inputPayload.userPrompt}, + ], + }, + ], + "systemInstruction": { + "role": "system", + "parts": [ + {"text": inputPayload.systemPrompt}, + ], + }, + "generationConfig": { + "temperature": + inputPayload + .configMap[LLMConfigName.temperature.name] + ?.configValue + .value + ?.$2 ?? + 0.5, + "topP": + inputPayload + .configMap[LLMConfigName.top_p.name] + ?.configValue + .value + ?.$2 ?? + 0.95, + if (inputPayload.configMap[LLMConfigName.max_tokens.name] != + null) ...{ + "maxOutputTokens": inputPayload + .configMap[LLMConfigName.max_tokens.name]! + .configValue + .value, + }, + }, + }, + ); + } + + @override + String? outputFormatter(Map x) { + return x['candidates']?[0]?['content']?['parts']?[0]?['text']; + } + + @override + String? streamOutputFormatter(Map x) { + return x['candidates']?[0]?['content']?['parts']?[0]?['text']; + } +} diff --git a/packages/genai/lib/providers/openai.dart b/packages/genai/lib/providers/openai.dart new file mode 100644 index 00000000..bd9bdc26 --- /dev/null +++ b/packages/genai/lib/providers/openai.dart @@ -0,0 +1,76 @@ +import '../llm_config.dart'; +import '../llm_input_payload.dart'; +import '../llm_request.dart'; +import 'common.dart'; + +class OpenAIModelController extends ModelController { + static final instance = OpenAIModelController(); + + @override + LLMInputPayload get inputPayload => LLMInputPayload( + endpoint: 'https://api.openai.com/v1/chat/completions', + credential: '', + systemPrompt: '', + userPrompt: '', + configMap: { + LLMConfigName.temperature.name: + defaultLLMConfigurations[LLMConfigName.temperature]!, + LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!, + }, + ).clone(); + + @override + LLMRequestDetails createRequest( + LLMModel model, + LLMInputPayload inputPayload, { + bool stream = false, + }) { + return LLMRequestDetails( + endpoint: inputPayload.endpoint, + headers: {'Authorization': "Bearer ${inputPayload.credential}"}, + method: 'POST', + body: { + 'model': model.identifier, + if (stream) ...{'stream': true}, + "messages": [ + {"role": "system", "content": inputPayload.systemPrompt}, + if (inputPayload.userPrompt.isNotEmpty) ...{ + {"role": "user", "content": inputPayload.userPrompt}, + } else ...{ + {"role": "user", "content": "Generate"}, + }, + ], + "temperature": + inputPayload + .configMap[LLMConfigName.temperature.name] + ?.configValue + .value + ?.$2 ?? + 0.5, + "top_p": + inputPayload + .configMap[LLMConfigName.top_p.name] + ?.configValue + .value + ?.$2 ?? + 0.95, + if (inputPayload.configMap[LLMConfigName.max_tokens.name] != null) ...{ + "max_tokens": inputPayload + .configMap[LLMConfigName.max_tokens.name]! + .configValue + .value, + }, + }, + ); + } + + @override + String? outputFormatter(Map x) { + return x["choices"]?[0]["message"]?["content"]?.trim(); + } + + @override + String? streamOutputFormatter(Map x) { + return x["choices"]?[0]["delta"]?["content"]; + } +} diff --git a/packages/genai/lib/providers/providers.dart b/packages/genai/lib/providers/providers.dart index 97ed2378..dff914ef 100644 --- a/packages/genai/lib/providers/providers.dart +++ b/packages/genai/lib/providers/providers.dart @@ -1,6 +1,10 @@ import '../llm_manager.dart'; +import 'anthropic.dart'; +import 'azureopenai.dart'; import 'common.dart'; +import 'gemini.dart'; import 'ollama.dart'; +import 'openai.dart'; enum LLMProvider { gemini('Gemini'), @@ -27,8 +31,14 @@ enum LLMProvider { switch (this) { case LLMProvider.ollama: return OllamaModelController.instance; - case _: - return OllamaModelController.instance; + case LLMProvider.gemini: + return GeminiModelController.instance; + case LLMProvider.azureopenai: + return AzureOpenAIModelController.instance; + case LLMProvider.openai: + return OpenAIModelController.instance; + case LLMProvider.anthropic: + return AnthropicModelController.instance; } }