From db599b3d7491bec3a849209fdbe1eb0ed7df35ee Mon Sep 17 00:00:00 2001 From: Manas Hejmadi Date: Sat, 26 Jul 2025 03:37:17 +0530 Subject: [PATCH] Streaming Option added to AI Request Configurations --- lib/providers/collection_providers.dart | 7 ++++++- packages/genai/lib/llm_config.dart | 10 +++++++++- packages/genai/lib/models/ai_request_model.dart | 4 ++-- packages/genai/lib/providers/gemini.dart | 3 +++ 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/lib/providers/collection_providers.dart b/lib/providers/collection_providers.dart index 37eb9234..d74dca43 100644 --- a/lib/providers/collection_providers.dart +++ b/lib/providers/collection_providers.dart @@ -339,7 +339,12 @@ class CollectionStateNotifier if (apiType == APIType.ai) { aiRequestModel = requestModel.aiRequestModel!; - final genAIRequest = aiRequestModel.createRequest(); + + final streamingMode = aiRequestModel.payload + .configMap[LLMConfigName.stream.name]?.configValue.value ?? + false; + + final genAIRequest = aiRequestModel.createRequest(stream: streamingMode); substitutedHttpRequestModel = getSubstitutedHttpRequestModel( HttpRequestModel( method: HTTPVerb.post, diff --git a/packages/genai/lib/llm_config.dart b/packages/genai/lib/llm_config.dart index 2944bd0e..19aec010 100644 --- a/packages/genai/lib/llm_config.dart +++ b/packages/genai/lib/llm_config.dart @@ -164,7 +164,7 @@ class LLMConfigTextValue extends LLMModelConfigValue { } } -enum LLMConfigName { temperature, top_p, max_tokens, endpoint } +enum LLMConfigName { temperature, top_p, max_tokens, endpoint, stream } Map defaultLLMConfigurations = { LLMConfigName.temperature: LLMModelConfiguration( @@ -188,4 +188,12 @@ Map defaultLLMConfigurations = { configType: LLMModelConfigurationType.numeric, configValue: LLMConfigNumericValue(value: -1), ), + LLMConfigName.stream: LLMModelConfiguration( + configId: 'stream', + configName: 'Enable Streaming Mode', + configDescription: + 'The LLM output will be sent in a stream instead of all at once', + configType: LLMModelConfigurationType.boolean, + configValue: LLMConfigBooleanValue(value: false), + ), }; diff --git a/packages/genai/lib/models/ai_request_model.dart b/packages/genai/lib/models/ai_request_model.dart index 11c79d3d..724a567c 100644 --- a/packages/genai/lib/models/ai_request_model.dart +++ b/packages/genai/lib/models/ai_request_model.dart @@ -30,9 +30,9 @@ class AIRequestModel with _$AIRequestModel { return AIRequestModel(payload: p, model: model, provider: provider); } - LLMRequestDetails createRequest() { + LLMRequestDetails createRequest({bool stream = false}) { final controller = model.provider.modelController; - return controller.createRequest(model, payload, stream: true); + return controller.createRequest(model, payload, stream: stream); } factory AIRequestModel.fromDefaultSaveObject(LLMSaveObject? defaultLLMSO) { diff --git a/packages/genai/lib/providers/gemini.dart b/packages/genai/lib/providers/gemini.dart index a4508965..65e2843d 100644 --- a/packages/genai/lib/providers/gemini.dart +++ b/packages/genai/lib/providers/gemini.dart @@ -12,9 +12,12 @@ class GeminiModelController extends ModelController { systemPrompt: '', userPrompt: '', configMap: { + //TODO: CHANGES TO THESE DO NOT APPLY TO OLDER REQUESTS!!!!!! LLMConfigName.temperature.name: defaultLLMConfigurations[LLMConfigName.temperature]!, LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!, + LLMConfigName.stream.name: + defaultLLMConfigurations[LLMConfigName.stream]!, }, ).clone();