mirror of
https://github.com/MindWorkAI/AI-Studio.git
synced 2025-08-21 01:52:57 +00:00
Added capabilities for DeepSeek, Google, and Mistral models
This commit is contained in:
parent
3401715e6e
commit
a3abdf6133
@ -105,6 +105,27 @@ public sealed class ProviderDeepSeek(ILogger logger) : BaseProvider("https://api
|
||||
{
|
||||
return Task.FromResult(Enumerable.Empty<Model>());
|
||||
}
|
||||
|
||||
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
|
||||
{
|
||||
var modelName = model.Id.ToLowerInvariant().AsSpan();
|
||||
|
||||
if(modelName.IndexOf("reasoner") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT,
|
||||
Capability.TEXT_OUTPUT,
|
||||
|
||||
Capability.ALWAYS_REASONING,
|
||||
];
|
||||
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT,
|
||||
Capability.TEXT_OUTPUT,
|
||||
];
|
||||
}
|
||||
|
||||
|
||||
#endregion
|
||||
|
||||
|
@ -118,7 +118,89 @@ public class ProviderGoogle(ILogger logger) : BaseProvider("https://generativela
|
||||
model.Name.StartsWith("models/text-embedding-", StringComparison.InvariantCultureIgnoreCase))
|
||||
.Select(n => new Provider.Model(n.Name.Replace("models/", string.Empty), n.DisplayName));
|
||||
}
|
||||
|
||||
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model)
|
||||
{
|
||||
var modelName = model.Id.ToLowerInvariant().AsSpan();
|
||||
|
||||
if (modelName.IndexOf("gemini-") is not -1)
|
||||
{
|
||||
// Reasoning models:
|
||||
if (modelName.IndexOf("gemini-2.5") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
|
||||
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
|
||||
|
||||
Capability.TEXT_OUTPUT,
|
||||
|
||||
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
|
||||
];
|
||||
|
||||
// Image generation:
|
||||
if(modelName.IndexOf("-2.0-flash-preview-image-") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
|
||||
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
|
||||
|
||||
Capability.TEXT_OUTPUT, Capability.IMAGE_OUTPUT,
|
||||
];
|
||||
|
||||
// Realtime model:
|
||||
if(modelName.IndexOf("-2.0-flash-live-") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.AUDIO_INPUT, Capability.SPEECH_INPUT,
|
||||
Capability.VIDEO_INPUT,
|
||||
|
||||
Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT,
|
||||
|
||||
Capability.FUNCTION_CALLING,
|
||||
];
|
||||
|
||||
// The 2.0 flash models cannot call functions:
|
||||
if(modelName.IndexOf("-2.0-flash-") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
|
||||
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
|
||||
|
||||
Capability.TEXT_OUTPUT,
|
||||
];
|
||||
|
||||
// The old 1.0 pro vision model:
|
||||
if(modelName.IndexOf("pro-vision") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
|
||||
|
||||
Capability.TEXT_OUTPUT,
|
||||
];
|
||||
|
||||
// Default to all other Gemini models:
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
|
||||
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
|
||||
|
||||
Capability.TEXT_OUTPUT,
|
||||
|
||||
Capability.FUNCTION_CALLING,
|
||||
];
|
||||
}
|
||||
|
||||
// Default for all other models:
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
|
||||
|
||||
Capability.TEXT_OUTPUT,
|
||||
|
||||
Capability.FUNCTION_CALLING,
|
||||
];
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
private async Task<ModelsResponse> LoadModels(CancellationToken token, string? apiKeyProvisional = null)
|
||||
|
@ -121,7 +121,59 @@ public sealed class ProviderMistral(ILogger logger) : BaseProvider("https://api.
|
||||
{
|
||||
return Task.FromResult(Enumerable.Empty<Provider.Model>());
|
||||
}
|
||||
|
||||
|
||||
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model)
|
||||
{
|
||||
var modelName = model.Id.ToLowerInvariant().AsSpan();
|
||||
|
||||
// Pixtral models are able to do process images:
|
||||
if (modelName.IndexOf("pixtral") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
|
||||
Capability.TEXT_OUTPUT,
|
||||
|
||||
Capability.FUNCTION_CALLING,
|
||||
];
|
||||
|
||||
// Mistral medium:
|
||||
if (modelName.IndexOf("mistral-medium-") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
|
||||
Capability.TEXT_OUTPUT,
|
||||
|
||||
Capability.FUNCTION_CALLING,
|
||||
];
|
||||
|
||||
// Mistral small:
|
||||
if (modelName.IndexOf("mistral-small-") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
|
||||
Capability.TEXT_OUTPUT,
|
||||
|
||||
Capability.FUNCTION_CALLING,
|
||||
];
|
||||
|
||||
// Mistral saba:
|
||||
if (modelName.IndexOf("mistral-saba-") is not -1)
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT,
|
||||
Capability.TEXT_OUTPUT,
|
||||
];
|
||||
|
||||
// Default:
|
||||
return
|
||||
[
|
||||
Capability.TEXT_INPUT,
|
||||
Capability.TEXT_OUTPUT,
|
||||
|
||||
Capability.FUNCTION_CALLING,
|
||||
];
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
private async Task<ModelsResponse> LoadModelList(string? apiKeyProvisional, CancellationToken token)
|
||||
|
Loading…
Reference in New Issue
Block a user