Added capabilities for DeepSeek, Google, and Mistral models

This commit is contained in:
Thorsten Sommer 2025-05-07 21:02:06 +02:00
parent 3401715e6e
commit a3abdf6133
Signed by: tsommer
GPG Key ID: 371BBA77A02C0108
3 changed files with 156 additions and 1 deletions

View File

@ -105,6 +105,27 @@ public sealed class ProviderDeepSeek(ILogger logger) : BaseProvider("https://api
{
return Task.FromResult(Enumerable.Empty<Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if(modelName.IndexOf("reasoner") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
];
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
];
}
#endregion

View File

@ -118,7 +118,89 @@ public class ProviderGoogle(ILogger logger) : BaseProvider("https://generativela
model.Name.StartsWith("models/text-embedding-", StringComparison.InvariantCultureIgnoreCase))
.Select(n => new Provider.Model(n.Name.Replace("models/", string.Empty), n.DisplayName));
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if (modelName.IndexOf("gemini-") is not -1)
{
// Reasoning models:
if (modelName.IndexOf("gemini-2.5") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
];
// Image generation:
if(modelName.IndexOf("-2.0-flash-preview-image-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.IMAGE_OUTPUT,
];
// Realtime model:
if(modelName.IndexOf("-2.0-flash-live-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.AUDIO_INPUT, Capability.SPEECH_INPUT,
Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT,
Capability.FUNCTION_CALLING,
];
// The 2.0 flash models cannot call functions:
if(modelName.IndexOf("-2.0-flash-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
];
// The old 1.0 pro vision model:
if(modelName.IndexOf("pro-vision") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
];
// Default to all other Gemini models:
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
];
}
// Default for all other models:
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
];
}
#endregion
private async Task<ModelsResponse> LoadModels(CancellationToken token, string? apiKeyProvisional = null)

View File

@ -121,7 +121,59 @@ public sealed class ProviderMistral(ILogger logger) : BaseProvider("https://api.
{
return Task.FromResult(Enumerable.Empty<Provider.Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Pixtral models are able to do process images:
if (modelName.IndexOf("pixtral") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
];
// Mistral medium:
if (modelName.IndexOf("mistral-medium-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
];
// Mistral small:
if (modelName.IndexOf("mistral-small-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
];
// Mistral saba:
if (modelName.IndexOf("mistral-saba-") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
];
// Default:
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
];
}
#endregion
private async Task<ModelsResponse> LoadModelList(string? apiKeyProvisional, CancellationToken token)