mirror of
https://github.com/foss42/apidash.git
synced 2025-12-02 10:49:49 +08:00
Streaming Option added to AI Request Configurations
This commit is contained in:
@@ -339,7 +339,12 @@ class CollectionStateNotifier
|
|||||||
|
|
||||||
if (apiType == APIType.ai) {
|
if (apiType == APIType.ai) {
|
||||||
aiRequestModel = requestModel.aiRequestModel!;
|
aiRequestModel = requestModel.aiRequestModel!;
|
||||||
final genAIRequest = aiRequestModel.createRequest();
|
|
||||||
|
final streamingMode = aiRequestModel.payload
|
||||||
|
.configMap[LLMConfigName.stream.name]?.configValue.value ??
|
||||||
|
false;
|
||||||
|
|
||||||
|
final genAIRequest = aiRequestModel.createRequest(stream: streamingMode);
|
||||||
substitutedHttpRequestModel = getSubstitutedHttpRequestModel(
|
substitutedHttpRequestModel = getSubstitutedHttpRequestModel(
|
||||||
HttpRequestModel(
|
HttpRequestModel(
|
||||||
method: HTTPVerb.post,
|
method: HTTPVerb.post,
|
||||||
|
|||||||
@@ -164,7 +164,7 @@ class LLMConfigTextValue extends LLMModelConfigValue {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum LLMConfigName { temperature, top_p, max_tokens, endpoint }
|
enum LLMConfigName { temperature, top_p, max_tokens, endpoint, stream }
|
||||||
|
|
||||||
Map<LLMConfigName, LLMModelConfiguration> defaultLLMConfigurations = {
|
Map<LLMConfigName, LLMModelConfiguration> defaultLLMConfigurations = {
|
||||||
LLMConfigName.temperature: LLMModelConfiguration(
|
LLMConfigName.temperature: LLMModelConfiguration(
|
||||||
@@ -188,4 +188,12 @@ Map<LLMConfigName, LLMModelConfiguration> defaultLLMConfigurations = {
|
|||||||
configType: LLMModelConfigurationType.numeric,
|
configType: LLMModelConfigurationType.numeric,
|
||||||
configValue: LLMConfigNumericValue(value: -1),
|
configValue: LLMConfigNumericValue(value: -1),
|
||||||
),
|
),
|
||||||
|
LLMConfigName.stream: LLMModelConfiguration(
|
||||||
|
configId: 'stream',
|
||||||
|
configName: 'Enable Streaming Mode',
|
||||||
|
configDescription:
|
||||||
|
'The LLM output will be sent in a stream instead of all at once',
|
||||||
|
configType: LLMModelConfigurationType.boolean,
|
||||||
|
configValue: LLMConfigBooleanValue(value: false),
|
||||||
|
),
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -30,9 +30,9 @@ class AIRequestModel with _$AIRequestModel {
|
|||||||
return AIRequestModel(payload: p, model: model, provider: provider);
|
return AIRequestModel(payload: p, model: model, provider: provider);
|
||||||
}
|
}
|
||||||
|
|
||||||
LLMRequestDetails createRequest() {
|
LLMRequestDetails createRequest({bool stream = false}) {
|
||||||
final controller = model.provider.modelController;
|
final controller = model.provider.modelController;
|
||||||
return controller.createRequest(model, payload, stream: true);
|
return controller.createRequest(model, payload, stream: stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
factory AIRequestModel.fromDefaultSaveObject(LLMSaveObject? defaultLLMSO) {
|
factory AIRequestModel.fromDefaultSaveObject(LLMSaveObject? defaultLLMSO) {
|
||||||
|
|||||||
@@ -12,9 +12,12 @@ class GeminiModelController extends ModelController {
|
|||||||
systemPrompt: '',
|
systemPrompt: '',
|
||||||
userPrompt: '',
|
userPrompt: '',
|
||||||
configMap: {
|
configMap: {
|
||||||
|
//TODO: CHANGES TO THESE DO NOT APPLY TO OLDER REQUESTS!!!!!!
|
||||||
LLMConfigName.temperature.name:
|
LLMConfigName.temperature.name:
|
||||||
defaultLLMConfigurations[LLMConfigName.temperature]!,
|
defaultLLMConfigurations[LLMConfigName.temperature]!,
|
||||||
LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!,
|
LLMConfigName.top_p.name: defaultLLMConfigurations[LLMConfigName.top_p]!,
|
||||||
|
LLMConfigName.stream.name:
|
||||||
|
defaultLLMConfigurations[LLMConfigName.stream]!,
|
||||||
},
|
},
|
||||||
).clone();
|
).clone();
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user