diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Alibaba.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Alibaba.cs new file mode 100644 index 00000000..c7e1a20e --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Alibaba.cs @@ -0,0 +1,84 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesAlibaba(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + // Qwen models: + if (modelName.StartsWith("qwen")) + { + // Check for omni models: + if (modelName.IndexOf("omni") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.AUDIO_INPUT, Capability.SPEECH_INPUT, + Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT, + + Capability.CHAT_COMPLETION_API, + ]; + + // Check for Qwen 3: + if(modelName.StartsWith("qwen3")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + if(modelName.IndexOf("-vl-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.CHAT_COMPLETION_API, + ]; + } + + // QwQ models: + if (modelName.StartsWith("qwq")) + { + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // QVQ models: + if (modelName.StartsWith("qvq")) + { + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // Default to text input and output: + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Anthropic.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Anthropic.cs new file mode 100644 index 00000000..3bf2f0b5 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Anthropic.cs @@ -0,0 +1,49 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesAnthropic(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + // Claude 4.x models: + if(modelName.StartsWith("claude-opus-4") || modelName.StartsWith("claude-sonnet-4")) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Claude 3.7 is able to do reasoning: + if(modelName.StartsWith("claude-3-7")) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // All other 3.x models are able to process text and images as input: + if(modelName.StartsWith("claude-3-")) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Any other model is able to process text only: + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.DeepSeek.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.DeepSeek.cs new file mode 100644 index 00000000..0d3428e9 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.DeepSeek.cs @@ -0,0 +1,28 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesDeepSeek(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + if(modelName.IndexOf("reasoner") is not -1) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Google.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Google.cs new file mode 100644 index 00000000..379370c1 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Google.cs @@ -0,0 +1,95 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesGoogle(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + if (modelName.IndexOf("gemini-") is not -1) + { + // Reasoning models: + if (modelName.IndexOf("gemini-2.5") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, + Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Image generation: + if(modelName.IndexOf("-2.0-flash-preview-image-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, + Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, Capability.IMAGE_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // Realtime model: + if(modelName.IndexOf("-2.0-flash-live-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.AUDIO_INPUT, Capability.SPEECH_INPUT, + Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // The 2.0 flash models cannot call functions: + if(modelName.IndexOf("-2.0-flash-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, + Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // The old 1.0 pro vision model: + if(modelName.IndexOf("pro-vision") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // Default to all other Gemini models: + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, + Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // Default for all other models: + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Mistral.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Mistral.cs new file mode 100644 index 00000000..c778e2b0 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Mistral.cs @@ -0,0 +1,56 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesMistral(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + // Pixtral models are able to do process images: + if (modelName.IndexOf("pixtral") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Mistral medium: + if (modelName.IndexOf("mistral-medium-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Mistral small: + if (modelName.IndexOf("mistral-small-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Mistral saba: + if (modelName.IndexOf("mistral-saba-") is not -1) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // Default: + return GetModelCapabilitiesOpenSource(model); + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.OpenAI.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.OpenAI.cs new file mode 100644 index 00000000..c3352b2e --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.OpenAI.cs @@ -0,0 +1,144 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesOpenAI(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + if (modelName is "gpt-4o-search-preview") + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.WEB_SEARCH, + Capability.CHAT_COMPLETION_API, + ]; + + if (modelName is "gpt-4o-mini-search-preview") + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.WEB_SEARCH, + Capability.CHAT_COMPLETION_API, + ]; + + if (modelName.StartsWith("o1-mini")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + + if(modelName is "gpt-3.5-turbo") + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.RESPONSES_API, + ]; + + if(modelName.StartsWith("gpt-3.5")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + if (modelName.StartsWith("chatgpt-4o-")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.RESPONSES_API, + ]; + + if (modelName.StartsWith("o3-mini")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.RESPONSES_API, + ]; + + if (modelName.StartsWith("o4-mini") || modelName.StartsWith("o3")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.WEB_SEARCH, + Capability.RESPONSES_API, + ]; + + if (modelName.StartsWith("o1")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.RESPONSES_API, + ]; + + if(modelName.StartsWith("gpt-4-turbo")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.RESPONSES_API, + ]; + + if(modelName is "gpt-4" || modelName.StartsWith("gpt-4-")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.RESPONSES_API, + ]; + + if(modelName.StartsWith("gpt-5-nano")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING, + Capability.RESPONSES_API, + ]; + + if(modelName is "gpt-5" || modelName.StartsWith("gpt-5-")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING, + Capability.WEB_SEARCH, + Capability.RESPONSES_API, + ]; + + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.RESPONSES_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Perplexity.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Perplexity.cs new file mode 100644 index 00000000..da873009 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Perplexity.cs @@ -0,0 +1,38 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesPerplexity(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + if(modelName.IndexOf("reasoning") is not -1 || + modelName.IndexOf("deep-research") is not -1) + return + [ + Capability.TEXT_INPUT, + Capability.MULTIPLE_IMAGE_INPUT, + + Capability.TEXT_OUTPUT, + Capability.IMAGE_OUTPUT, + + Capability.ALWAYS_REASONING, + Capability.WEB_SEARCH, + Capability.CHAT_COMPLETION_API, + ]; + + return + [ + Capability.TEXT_INPUT, + Capability.MULTIPLE_IMAGE_INPUT, + + Capability.TEXT_OUTPUT, + Capability.IMAGE_OUTPUT, + + Capability.WEB_SEARCH, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.cs new file mode 100644 index 00000000..002295c3 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.cs @@ -0,0 +1,29 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilities(this Provider provider) => provider.UsedLLMProvider switch + { + LLMProviders.OPEN_AI => GetModelCapabilitiesOpenAI(provider.Model), + LLMProviders.MISTRAL => GetModelCapabilitiesMistral(provider.Model), + LLMProviders.ANTHROPIC => GetModelCapabilitiesAnthropic(provider.Model), + LLMProviders.GOOGLE => GetModelCapabilitiesGoogle(provider.Model), + LLMProviders.X => GetModelCapabilitiesOpenSource(provider.Model), + LLMProviders.DEEP_SEEK => GetModelCapabilitiesDeepSeek(provider.Model), + LLMProviders.ALIBABA_CLOUD => GetModelCapabilitiesAlibaba(provider.Model), + LLMProviders.PERPLEXITY => GetModelCapabilitiesPerplexity(provider.Model), + + LLMProviders.GROQ => GetModelCapabilitiesOpenSource(provider.Model), + LLMProviders.FIREWORKS => GetModelCapabilitiesOpenSource(provider.Model), + LLMProviders.HUGGINGFACE => GetModelCapabilitiesOpenSource(provider.Model), + + LLMProviders.HELMHOLTZ => GetModelCapabilitiesOpenSource(provider.Model), + LLMProviders.GWDG => GetModelCapabilitiesOpenSource(provider.Model), + + LLMProviders.SELF_HOSTED => GetModelCapabilitiesOpenSource(provider.Model), + + _ => [] + }; +} \ No newline at end of file