mirror of
https://github.com/foss42/apidash.git
synced 2025-12-09 14:40:20 +08:00
Refactor genai package to new modular interface
Reorganized the genai package by removing legacy LLM-related files and introducing a new modular interface under the 'interface' directory. Added provider-specific model classes, centralized constants, and updated the example to use the new API and data structures. Updated exports in genai.dart and improved dependency management.
This commit is contained in:
71
packages/genai/lib/interface/consts.dart
Normal file
71
packages/genai/lib/interface/consts.dart
Normal file
@@ -0,0 +1,71 @@
|
||||
import '../consts.dart';
|
||||
import '../models/models.dart';
|
||||
import 'model_providers/model_providers.dart';
|
||||
|
||||
enum ModelAPIProvider { openai, anthropic, gemini, azureopenai, ollama }
|
||||
|
||||
final kModelProvidersMap = {
|
||||
ModelAPIProvider.openai: OpenAIModel.instance,
|
||||
ModelAPIProvider.anthropic: AnthropicModel.instance,
|
||||
ModelAPIProvider.gemini: GeminiModel.instance,
|
||||
ModelAPIProvider.azureopenai: AzureOpenAIModel.instance,
|
||||
ModelAPIProvider.ollama: OllamaModel.instance,
|
||||
};
|
||||
|
||||
const kAnthropicUrl = 'https://api.anthropic.com/v1/messages';
|
||||
const kGeminiUrl = 'https://generativelanguage.googleapis.com/v1beta/models';
|
||||
const kOpenAIUrl = 'https://api.openai.com/v1/chat/completions';
|
||||
const kOllamaUrl = '$kBaseOllamaUrl/v1/chat/completions';
|
||||
|
||||
final kDefaultModelRequestData = ModelRequestData(
|
||||
url: '',
|
||||
model: '',
|
||||
apiKey: '',
|
||||
systemPrompt: '',
|
||||
userPrompt: '',
|
||||
modelConfigs: [
|
||||
kDefaultModelConfigTemperature,
|
||||
kDefaultModelConfigTopP,
|
||||
kDefaultModelConfigMaxTokens,
|
||||
],
|
||||
stream: false,
|
||||
);
|
||||
|
||||
final kDefaultModelConfigTemperature = ModelConfig(
|
||||
id: 'temperature',
|
||||
name: 'Temperature',
|
||||
description: 'The Temperature of the Model',
|
||||
type: ConfigType.slider,
|
||||
value: ConfigSliderValue(value: (0, 0.5, 1)),
|
||||
);
|
||||
|
||||
final kDefaultModelConfigTopP = ModelConfig(
|
||||
id: 'top_p',
|
||||
name: 'Top P',
|
||||
description: 'The Top P of the Model',
|
||||
type: ConfigType.slider,
|
||||
value: ConfigSliderValue(value: (0, 0.95, 1)),
|
||||
);
|
||||
|
||||
final kDefaultModelConfigMaxTokens = ModelConfig(
|
||||
id: 'max_tokens',
|
||||
name: 'Maximum Tokens',
|
||||
description: 'The maximum number of tokens allowed in the output',
|
||||
type: ConfigType.numeric,
|
||||
value: ConfigNumericValue(value: 1024),
|
||||
);
|
||||
|
||||
final kDefaultModelConfigStream = ModelConfig(
|
||||
id: 'stream',
|
||||
name: 'Enable Streaming Mode',
|
||||
description: 'The LLM output will be sent in a stream instead of all at once',
|
||||
type: ConfigType.boolean,
|
||||
value: ConfigBooleanValue(value: false),
|
||||
);
|
||||
|
||||
final kDefaultGeminiModelConfigTopP = kDefaultModelConfigTopP.copyWith(
|
||||
id: 'topP',
|
||||
);
|
||||
|
||||
final kDefaultGeminiModelConfigMaxTokens = kDefaultModelConfigMaxTokens
|
||||
.copyWith(id: 'maxOutputTokens');
|
||||
2
packages/genai/lib/interface/interface.dart
Normal file
2
packages/genai/lib/interface/interface.dart
Normal file
@@ -0,0 +1,2 @@
|
||||
export 'model_providers/model_providers.dart';
|
||||
export 'consts.dart';
|
||||
48
packages/genai/lib/interface/model_providers/anthropic.dart
Normal file
48
packages/genai/lib/interface/model_providers/anthropic.dart
Normal file
@@ -0,0 +1,48 @@
|
||||
import 'package:better_networking/better_networking.dart';
|
||||
import '../../models/models.dart';
|
||||
import '../consts.dart';
|
||||
|
||||
class AnthropicModel extends ModelProvider {
|
||||
static final instance = AnthropicModel();
|
||||
|
||||
@override
|
||||
ModelRequestData get defaultRequestData =>
|
||||
kDefaultModelRequestData.copyWith(url: kAnthropicUrl);
|
||||
|
||||
@override
|
||||
HttpRequestModel? createRequest(ModelRequestData? requestData) {
|
||||
if (requestData == null) {
|
||||
return null;
|
||||
}
|
||||
return HttpRequestModel(
|
||||
method: HTTPVerb.post,
|
||||
url: requestData.url,
|
||||
headers: const [
|
||||
NameValueModel(name: "anthropic-version", value: "2023-06-01"),
|
||||
],
|
||||
authModel: AuthModel(
|
||||
type: APIAuthType.apiKey,
|
||||
apikey: AuthApiKeyModel(key: requestData.apiKey),
|
||||
),
|
||||
body: kJsonEncoder.convert({
|
||||
"model": requestData.model,
|
||||
"messages": [
|
||||
{"role": "system", "content": requestData.systemPrompt},
|
||||
{"role": "user", "content": requestData.userPrompt},
|
||||
],
|
||||
...requestData.getModelConfigMap(),
|
||||
if (requestData.stream ?? false) ...{'stream': true},
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
@override
|
||||
String? outputFormatter(Map x) {
|
||||
return x['content']?[0]['text'];
|
||||
}
|
||||
|
||||
@override
|
||||
String? streamOutputFormatter(Map x) {
|
||||
return x['text'];
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,50 @@
|
||||
import 'package:better_networking/better_networking.dart';
|
||||
import '../../models/models.dart';
|
||||
import '../consts.dart';
|
||||
|
||||
class AzureOpenAIModel extends ModelProvider {
|
||||
static final instance = AzureOpenAIModel();
|
||||
@override
|
||||
ModelRequestData get defaultRequestData => kDefaultModelRequestData;
|
||||
|
||||
@override
|
||||
HttpRequestModel? createRequest(ModelRequestData? requestData) {
|
||||
if (requestData == null) {
|
||||
return null;
|
||||
}
|
||||
if (requestData.url.isEmpty) {
|
||||
throw Exception('MODEL ENDPOINT IS EMPTY');
|
||||
}
|
||||
return HttpRequestModel(
|
||||
method: HTTPVerb.post,
|
||||
url: requestData.url,
|
||||
authModel: AuthModel(
|
||||
type: APIAuthType.apiKey,
|
||||
apikey: AuthApiKeyModel(key: requestData.apiKey, name: 'api-key'),
|
||||
),
|
||||
body: kJsonEncoder.convert({
|
||||
"model": requestData.model,
|
||||
"messages": [
|
||||
{"role": "system", "content": requestData.systemPrompt},
|
||||
if (requestData.userPrompt.isNotEmpty) ...{
|
||||
{"role": "user", "content": requestData.userPrompt},
|
||||
} else ...{
|
||||
{"role": "user", "content": "Generate"},
|
||||
},
|
||||
],
|
||||
...requestData.getModelConfigMap(),
|
||||
if (requestData.stream ?? false) ...{'stream': true},
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
@override
|
||||
String? outputFormatter(Map x) {
|
||||
return x["choices"]?[0]["message"]?["content"]?.trim();
|
||||
}
|
||||
|
||||
@override
|
||||
String? streamOutputFormatter(Map x) {
|
||||
return x["choices"]?[0]["delta"]?["content"];
|
||||
}
|
||||
}
|
||||
72
packages/genai/lib/interface/model_providers/gemini.dart
Normal file
72
packages/genai/lib/interface/model_providers/gemini.dart
Normal file
@@ -0,0 +1,72 @@
|
||||
import 'package:better_networking/better_networking.dart';
|
||||
import '../../models/models.dart';
|
||||
import '../consts.dart';
|
||||
|
||||
class GeminiModel extends ModelProvider {
|
||||
static final instance = GeminiModel();
|
||||
|
||||
@override
|
||||
ModelRequestData get defaultRequestData => kDefaultModelRequestData.copyWith(
|
||||
url: kGeminiUrl,
|
||||
modelConfigs: [
|
||||
kDefaultModelConfigTemperature,
|
||||
kDefaultGeminiModelConfigTopP,
|
||||
kDefaultGeminiModelConfigMaxTokens,
|
||||
],
|
||||
);
|
||||
|
||||
@override
|
||||
HttpRequestModel? createRequest(ModelRequestData? requestData) {
|
||||
if (requestData == null) {
|
||||
return null;
|
||||
}
|
||||
List<NameValueModel> params = [];
|
||||
String endpoint = "${requestData.url}/${requestData.model}:";
|
||||
if (requestData.stream ?? false) {
|
||||
endpoint += 'streamGenerateContent';
|
||||
params.add(const NameValueModel(name: "alt", value: "sse"));
|
||||
} else {
|
||||
endpoint += 'generateContent';
|
||||
}
|
||||
|
||||
return HttpRequestModel(
|
||||
method: HTTPVerb.post,
|
||||
url: endpoint,
|
||||
authModel: AuthModel(
|
||||
type: APIAuthType.apiKey,
|
||||
apikey: AuthApiKeyModel(
|
||||
key: requestData.apiKey,
|
||||
location: 'query',
|
||||
name: 'key',
|
||||
),
|
||||
),
|
||||
body: kJsonEncoder.convert({
|
||||
"contents": [
|
||||
{
|
||||
"role": "user",
|
||||
"parts": [
|
||||
{"text": requestData.userPrompt},
|
||||
],
|
||||
},
|
||||
],
|
||||
"systemInstruction": {
|
||||
"role": "system",
|
||||
"parts": [
|
||||
{"text": requestData.systemPrompt},
|
||||
],
|
||||
},
|
||||
"generationConfig": requestData.getModelConfigMap(),
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
@override
|
||||
String? outputFormatter(Map x) {
|
||||
return x['candidates']?[0]?['content']?['parts']?[0]?['text'];
|
||||
}
|
||||
|
||||
@override
|
||||
String? streamOutputFormatter(Map x) {
|
||||
return x['candidates']?[0]?['content']?['parts']?[0]?['text'];
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
export 'anthropic.dart';
|
||||
export 'gemini.dart';
|
||||
export 'azureopenai.dart';
|
||||
export 'openai.dart';
|
||||
export 'ollama.dart';
|
||||
13
packages/genai/lib/interface/model_providers/ollama.dart
Normal file
13
packages/genai/lib/interface/model_providers/ollama.dart
Normal file
@@ -0,0 +1,13 @@
|
||||
import '../../models/models.dart';
|
||||
import '../consts.dart';
|
||||
import 'openai.dart';
|
||||
|
||||
class OllamaModel extends OpenAIModel {
|
||||
static final instance = OllamaModel();
|
||||
|
||||
@override
|
||||
ModelRequestData get defaultRequestData => kDefaultModelRequestData.copyWith(
|
||||
url: kOllamaUrl,
|
||||
modelConfigs: [kDefaultModelConfigTemperature, kDefaultModelConfigTopP],
|
||||
);
|
||||
}
|
||||
49
packages/genai/lib/interface/model_providers/openai.dart
Normal file
49
packages/genai/lib/interface/model_providers/openai.dart
Normal file
@@ -0,0 +1,49 @@
|
||||
import 'package:better_networking/better_networking.dart';
|
||||
import '../../models/models.dart';
|
||||
import '../consts.dart';
|
||||
|
||||
class OpenAIModel extends ModelProvider {
|
||||
static final instance = OpenAIModel();
|
||||
|
||||
@override
|
||||
ModelRequestData get defaultRequestData =>
|
||||
kDefaultModelRequestData.copyWith(url: kOpenAIUrl);
|
||||
|
||||
@override
|
||||
HttpRequestModel? createRequest(ModelRequestData? requestData) {
|
||||
if (requestData == null) {
|
||||
return null;
|
||||
}
|
||||
return HttpRequestModel(
|
||||
method: HTTPVerb.post,
|
||||
url: requestData.url,
|
||||
authModel: AuthModel(
|
||||
type: APIAuthType.bearer,
|
||||
bearer: AuthBearerModel(token: requestData.apiKey),
|
||||
),
|
||||
body: kJsonEncoder.convert({
|
||||
"model": requestData.model,
|
||||
"messages": [
|
||||
{"role": "system", "content": requestData.systemPrompt},
|
||||
if (requestData.userPrompt.isNotEmpty) ...{
|
||||
{"role": "user", "content": requestData.userPrompt},
|
||||
} else ...{
|
||||
{"role": "user", "content": "Generate"},
|
||||
},
|
||||
],
|
||||
...requestData.getModelConfigMap(),
|
||||
if (requestData.stream ?? false) ...{'stream': true},
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
@override
|
||||
String? outputFormatter(Map x) {
|
||||
return x["choices"]?[0]["message"]?["content"]?.trim();
|
||||
}
|
||||
|
||||
@override
|
||||
String? streamOutputFormatter(Map x) {
|
||||
return x["choices"]?[0]["delta"]?["content"];
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user