mirror of
https://github.com/foss42/apidash.git
synced 2025-12-01 18:28:25 +08:00
genai package
This Package contains all the code related to generative AI capabilities and is a foundational package that can be used in various projects
Fetch all available Remote LLMs
await LLMManager.fetchAvailableLLMs();
Getting LLM Models for a given Provider
final List<LLMModel> models = LLMProvider.gemini.models;
Calling a GenAI Model using the provided helper
final LLMModel geminiModel = LLMProvider.gemini.getLLMByIdentifier('gemini-2.0-flash');
final ModelController controller = model.provider.modelController;
GenerativeAI.callGenerativeModel(
geminiModel,
onAnswer: (x) {
print(x);
},
onError: (e){},
systemPrompt: 'Give a 100 word summary of the provided word. Only give the answer',
userPrompt: 'Pizza',
credential: 'AIza.....',
);
Calling a GenAI model (with Streaming)
final LLMModel geminiModel = LLMProvider.gemini.getLLMByIdentifier('gemini-2.0-flash');
final ModelController controller = model.provider.modelController;
GenerativeAI.callGenerativeModel(
geminiModel,
onAnswer: (x) {
stdout.write(x); //each word in the stream
},
onError: (e){},
systemPrompt: 'Give a 100 word summary of the provided word. Only give the answer',
userPrompt: 'Pizza',
credential: 'AIza.....',
stream: true,
);
Directly Using a Model (eg: Gemini)
final LLMModel model = LLMProvider.gemini.getLLMByIdentifier('gemini-2.0-flash');
final ModelController controller = model.provider.modelController;
final payload = controller.inputPayload;
payload.systemPrompt = 'Say YES or NO';
payload.userPrompt = 'The sun sets in the west';
payload.credential = 'AIza....';
final genAIRequest = controller.createRequest(model, payload);
final answer = await GenerativeAI.executeGenAIRequest(model, genAIRequest);
print(answer)