From 9ef1a047f047504ff591037864e6dd65b863e81f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peer=20Sch=C3=BCtt?= Date: Thu, 13 Nov 2025 18:13:16 +0100 Subject: [PATCH] Added expert mode to providers (#562) Co-authored-by: Thorsten Sommer --- .../Assistants/I18N/allTexts.lua | 19 ++- .../Settings/SettingsPanelProviders.razor.cs | 1 + .../Dialogs/ProviderDialog.razor | 12 ++ .../Dialogs/ProviderDialog.razor.cs | 23 +++ .../Plugins/configuration/plugin.lua | 15 +- .../plugin.lua | 19 ++- .../plugin.lua | 19 ++- .../AlibabaCloud/ProviderAlibabaCloud.cs | 82 +--------- .../Provider/Anthropic/ChatRequest.cs | 8 +- .../Provider/Anthropic/ProviderAnthropic.cs | 49 +----- .../Provider/BaseProvider.cs | 95 ++++++++--- .../Provider/DeepSeek/ProviderDeepSeek.cs | 26 +-- .../Provider/Fireworks/ChatRequest.cs | 9 +- .../Provider/Fireworks/ProviderFireworks.cs | 6 +- .../Provider/GWDG/ProviderGWDG.cs | 6 +- .../Provider/Google/ChatRequest.cs | 8 +- .../Provider/Google/ProviderGoogle.cs | 92 +---------- .../Provider/Groq/ChatRequest.cs | 8 +- .../Provider/Groq/ProviderGroq.cs | 6 +- .../Provider/Helmholtz/ProviderHelmholtz.cs | 6 +- .../HuggingFace/ProviderHuggingFace.cs | 6 +- app/MindWork AI Studio/Provider/IProvider.cs | 11 +- .../Provider/LLMProvidersExtensions.cs | 32 ++-- .../Provider/Mistral/ChatRequest.cs | 9 +- .../Provider/Mistral/ProviderMistral.cs | 56 +------ app/MindWork AI Studio/Provider/NoProvider.cs | 3 + .../OpenAI/ChatCompletionAPIRequest.cs | 6 + .../Provider/OpenAI/ProviderOpenAI.cs | 148 ++---------------- .../Provider/OpenAI/ResponsesAPIRequest.cs | 6 + .../Provider/Perplexity/ProviderPerplexity.cs | 36 +---- .../Provider/SelfHosted/ChatRequest.cs | 9 +- .../Provider/SelfHosted/ProviderSelfHosted.cs | 8 +- .../Provider/X/ProviderX.cs | 6 +- app/MindWork AI Studio/Settings/Provider.cs | 12 +- .../Settings/ProviderExtensions.Alibaba.cs | 84 ++++++++++ .../Settings/ProviderExtensions.Anthropic.cs | 49 ++++++ .../Settings/ProviderExtensions.DeepSeek.cs | 28 ++++ .../Settings/ProviderExtensions.Google.cs | 95 +++++++++++ .../Settings/ProviderExtensions.Mistral.cs | 56 +++++++ .../Settings/ProviderExtensions.OpenAI.cs | 144 +++++++++++++++++ .../ProviderExtensions.OpenSource.cs} | 8 +- .../Settings/ProviderExtensions.Perplexity.cs | 38 +++++ .../Settings/ProviderExtensions.cs | 29 ++++ .../wwwroot/changelog/v0.9.53.md | 3 +- 44 files changed, 860 insertions(+), 531 deletions(-) create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.Alibaba.cs create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.Anthropic.cs create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.DeepSeek.cs create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.Google.cs create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.Mistral.cs create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.OpenAI.cs rename app/MindWork AI Studio/{Provider/CapabilitiesOpenSource.cs => Settings/ProviderExtensions.OpenSource.cs} (97%) create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.Perplexity.cs create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.cs diff --git a/app/MindWork AI Studio/Assistants/I18N/allTexts.lua b/app/MindWork AI Studio/Assistants/I18N/allTexts.lua index f0ccad7f..8ac523db 100644 --- a/app/MindWork AI Studio/Assistants/I18N/allTexts.lua +++ b/app/MindWork AI Studio/Assistants/I18N/allTexts.lua @@ -3028,9 +3028,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T900713019"] = "Cancel" -- The profile name must be unique; the chosen name is already in use. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "The profile name must be unique; the chosen name is already in use." +-- Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model. +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1017509792"] = "Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model." + -- Hugging Face Inference Provider UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inference Provider" +-- Hide Expert Settings +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1108876344"] = "Hide Expert Settings" + -- Failed to store the API key in the operating system. The message was: {0}. Please try again. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Failed to store the API key in the operating system. The message was: {0}. Please try again." @@ -3043,6 +3049,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1356621346"] = "Create acco -- Load models UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Load models" +-- Add the parameters in proper JSON formatting, e.g., "temperature": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though. +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1689135032"] = "Add the parameters in proper JSON formatting, e.g., \"temperature\": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though." + -- Hostname UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname" @@ -3064,12 +3073,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2331453405"] = "(Optional) -- Add UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Add" +-- Additional API parameters +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2728244552"] = "Additional API parameters" + -- No models loaded or available. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "No models loaded or available." -- Instance Name UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instance Name" +-- Show Expert Settings +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Show Expert Settings" + -- Show available models UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Show available models" @@ -4813,8 +4828,8 @@ UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1674355816"] = "Tried to com -- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}' UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'" --- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}' -UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2249520705"] = "Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'" +-- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'. +UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2181034173"] = "Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'." -- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}' UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'" diff --git a/app/MindWork AI Studio/Components/Settings/SettingsPanelProviders.razor.cs b/app/MindWork AI Studio/Components/Settings/SettingsPanelProviders.razor.cs index 1af88e3e..af506e5e 100644 --- a/app/MindWork AI Studio/Components/Settings/SettingsPanelProviders.razor.cs +++ b/app/MindWork AI Studio/Components/Settings/SettingsPanelProviders.razor.cs @@ -72,6 +72,7 @@ public partial class SettingsPanelProviders : SettingsPanelBase { x => x.IsEditing, true }, { x => x.DataHost, provider.Host }, { x => x.HFInferenceProviderId, provider.HFInferenceProvider }, + { x => x.AdditionalJsonApiParameters, provider.AdditionalJsonApiParameters }, }; var dialogReference = await this.DialogService.ShowAsync(T("Edit LLM Provider"), dialogParameters, DialogOptions.FULLSCREEN); diff --git a/app/MindWork AI Studio/Dialogs/ProviderDialog.razor b/app/MindWork AI Studio/Dialogs/ProviderDialog.razor index 9da54aaf..e4c22f18 100644 --- a/app/MindWork AI Studio/Dialogs/ProviderDialog.razor +++ b/app/MindWork AI Studio/Dialogs/ProviderDialog.razor @@ -130,6 +130,18 @@ UserAttributes="@SPELLCHECK_ATTRIBUTES" /> + + + @(this.showExpertSettings ? T("Hide Expert Settings") : T("Show Expert Settings")) + + + + + @T("Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model.") + + + + diff --git a/app/MindWork AI Studio/Dialogs/ProviderDialog.razor.cs b/app/MindWork AI Studio/Dialogs/ProviderDialog.razor.cs index 43a47330..0c3d2b1f 100644 --- a/app/MindWork AI Studio/Dialogs/ProviderDialog.razor.cs +++ b/app/MindWork AI Studio/Dialogs/ProviderDialog.razor.cs @@ -78,6 +78,9 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId [Parameter] public bool IsEditing { get; init; } + [Parameter] + public string AdditionalJsonApiParameters { get; set; } = string.Empty; + [Inject] private RustService RustService { get; init; } = null!; @@ -94,6 +97,7 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId private string dataManuallyModel = string.Empty; private string dataAPIKeyStorageIssue = string.Empty; private string dataEditingPreviousInstanceName = string.Empty; + private bool showExpertSettings; // We get the form reference from Blazor code to validate it manually: private MudForm form = null!; @@ -135,6 +139,7 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId Hostname = cleanedHostname.EndsWith('/') ? cleanedHostname[..^1] : cleanedHostname, Host = this.DataHost, HFInferenceProvider = this.HFInferenceProviderId, + AdditionalJsonApiParameters = this.AdditionalJsonApiParameters, }; } @@ -149,6 +154,8 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId #pragma warning disable MWAIS0001 this.UsedInstanceNames = this.SettingsManager.ConfigurationData.Providers.Select(x => x.InstanceName.ToLowerInvariant()).ToList(); #pragma warning restore MWAIS0001 + + this.showExpertSettings = !string.IsNullOrWhiteSpace(this.AdditionalJsonApiParameters); // When editing, we need to load the data: if(this.IsEditing) @@ -268,4 +275,20 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId LLMProviders.SELF_HOSTED => T("(Optional) API Key"), _ => T("API Key"), }; + + private void ToggleExpertSettings() => this.showExpertSettings = !this.showExpertSettings; + + private void OnInputChangeExpertSettings() + { + this.AdditionalJsonApiParameters = this.AdditionalJsonApiParameters.Trim().TrimEnd(',', ' '); + } + + private string GetExpertStyles => this.showExpertSettings ? "border-2 border-dashed rounded pa-2" : string.Empty; + + private static string GetPlaceholderExpertSettings => + """ + "temperature": 0.5, + "top_p": 0.9, + "frequency_penalty": 0.0 + """; } \ No newline at end of file diff --git a/app/MindWork AI Studio/Plugins/configuration/plugin.lua b/app/MindWork AI Studio/Plugins/configuration/plugin.lua index cff866f7..77921f38 100644 --- a/app/MindWork AI Studio/Plugins/configuration/plugin.lua +++ b/app/MindWork AI Studio/Plugins/configuration/plugin.lua @@ -47,15 +47,24 @@ DEPRECATION_MESSAGE = "" CONFIG = {} CONFIG["LLM_PROVIDERS"] = {} --- An example of a configuration for a self-hosted ollama server: +-- An example of a configuration for a self-hosted server: CONFIG["LLM_PROVIDERS"][#CONFIG["LLM_PROVIDERS"]+1] = { ["Id"] = "00000000-0000-0000-0000-000000000000", ["InstanceName"] = "", ["UsedLLMProvider"] = "SELF_HOSTED", + + -- Allowed values for Host are: LM_STUDIO, LLAMACPP, OLLAMA, and VLLM ["Host"] = "OLLAMA", - ["Hostname"] = "", + ["Hostname"] = "", + + -- Optional: Additional parameters for the API. + -- Please refer to the documentation of the selected host for details. + -- Might be something like ... \"temperature\": 0.5 ... for one parameter. + -- Could be something like ... \"temperature\": 0.5, \"max_tokens\": 1000 ... for multiple parameters. + -- Please do not add the enclosing curly braces {} here. Also, no trailing comma is allowed. + ["AdditionalJsonApiParameters"] = "", ["Model"] = { - ["Id"] = "", + ["Id"] = "", ["DisplayName"] = "", } } diff --git a/app/MindWork AI Studio/Plugins/languages/de-de-43065dbc-78d0-45b7-92be-f14c2926e2dc/plugin.lua b/app/MindWork AI Studio/Plugins/languages/de-de-43065dbc-78d0-45b7-92be-f14c2926e2dc/plugin.lua index 7cf2c578..c4815d73 100644 --- a/app/MindWork AI Studio/Plugins/languages/de-de-43065dbc-78d0-45b7-92be-f14c2926e2dc/plugin.lua +++ b/app/MindWork AI Studio/Plugins/languages/de-de-43065dbc-78d0-45b7-92be-f14c2926e2dc/plugin.lua @@ -3030,9 +3030,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T900713019"] = "Abbrechen" -- The profile name must be unique; the chosen name is already in use. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "Der Profilname muss eindeutig sein; der ausgewählte Name wird bereits verwendet." +-- Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model. +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1017509792"] = "Bitte beachten Sie: Dieser Bereich ist nur für Expertinnen und Experten. Sie sind dafür verantwortlich, die Korrektheit der zusätzlichen Parameter zu überprüfen, die Sie beim API‑Aufruf angeben. Standardmäßig verwendet AI Studio die OpenAI‑kompatible Chat Completions-API, sofern diese vom zugrunde liegenden Dienst und Modell unterstützt wird." + -- Hugging Face Inference Provider UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inferenz-Anbieter" +-- Hide Expert Settings +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1108876344"] = "Experten-Einstellungen ausblenden" + -- Failed to store the API key in the operating system. The message was: {0}. Please try again. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Der API-Schlüssel konnte nicht im Betriebssystem gespeichert werden. Die Meldung war: {0}. Bitte versuchen Sie es erneut." @@ -3045,6 +3051,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1356621346"] = "Konto erste -- Load models UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Modelle laden" +-- Add the parameters in proper JSON formatting, e.g., "temperature": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though. +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1689135032"] = "Fügen Sie die Parameter in korrekter JSON-Formatierung hinzu, z. B. \"temperature\": 0.5. Entfernen Sie abschließende Kommas. Die üblichen äußeren geschweiften Klammern {} dürfen dabei jedoch nicht verwendet werden." + -- Hostname UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname" @@ -3066,12 +3075,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2331453405"] = "(Optional) -- Add UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Hinzufügen" +-- Additional API parameters +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2728244552"] = "Zusätzliche API-Parameter" + -- No models loaded or available. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "Keine Modelle geladen oder verfügbar." -- Instance Name UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instanzname" +-- Show Expert Settings +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Experten-Einstellungen anzeigen" + -- Show available models UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Verfügbare Modelle anzeigen" @@ -4815,8 +4830,8 @@ UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1674355816"] = "Es wurde ver -- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}' UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Der Versuch, die Antwort des LLM-Anbieters '{0}' zu streamen, ist fehlgeschlagen. Der Stream konnte nicht gelesen werden. Die Meldung lautet: '{1}'" --- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}' -UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2249520705"] = "Es wurde versucht, mit dem LLM-Anbieter '{0}' zu kommunizieren. Auch nach {1} Versuchen gab es Probleme mit der Anfrage. Die Nachricht des Anbieters lautet: '{2}'" +-- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'. +UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2181034173"] = "Versuchte, mit dem LLM-Anbieter '{0}' zu kommunizieren. Auch nach {1} Wiederholungsversuchen gab es Probleme mit der Anfrage. Die Meldung des Anbieters lautet: '{2}'." -- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}' UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Es wurde versucht, mit dem LLM-Anbieter '{0}' zu kommunizieren. Etwas wurde nicht gefunden. Die Meldung des Anbieters lautet: '{1}'" diff --git a/app/MindWork AI Studio/Plugins/languages/en-us-97dfb1ba-50c4-4440-8dfa-6575daf543c8/plugin.lua b/app/MindWork AI Studio/Plugins/languages/en-us-97dfb1ba-50c4-4440-8dfa-6575daf543c8/plugin.lua index e22f8587..e968c5bc 100644 --- a/app/MindWork AI Studio/Plugins/languages/en-us-97dfb1ba-50c4-4440-8dfa-6575daf543c8/plugin.lua +++ b/app/MindWork AI Studio/Plugins/languages/en-us-97dfb1ba-50c4-4440-8dfa-6575daf543c8/plugin.lua @@ -3030,9 +3030,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T900713019"] = "Cancel" -- The profile name must be unique; the chosen name is already in use. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "The profile name must be unique; the chosen name is already in use." +-- Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model. +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1017509792"] = "Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model." + -- Hugging Face Inference Provider UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inference Provider" +-- Hide Expert Settings +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1108876344"] = "Hide Expert Settings" + -- Failed to store the API key in the operating system. The message was: {0}. Please try again. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Failed to store the API key in the operating system. The message was: {0}. Please try again." @@ -3045,6 +3051,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1356621346"] = "Create acco -- Load models UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Load models" +-- Add the parameters in proper JSON formatting, e.g., "temperature": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though. +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1689135032"] = "Add the parameters in proper JSON formatting, e.g., \"temperature\": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though." + -- Hostname UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname" @@ -3066,12 +3075,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2331453405"] = "(Optional) -- Add UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Add" +-- Additional API parameters +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2728244552"] = "Additional API parameters" + -- No models loaded or available. UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "No models loaded or available." -- Instance Name UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instance Name" +-- Show Expert Settings +UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Show Expert Settings" + -- Show available models UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Show available models" @@ -4815,8 +4830,8 @@ UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1674355816"] = "Tried to com -- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}' UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'" --- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}' -UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2249520705"] = "Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'" +-- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'. +UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2181034173"] = "Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'." -- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}' UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'" diff --git a/app/MindWork AI Studio/Provider/AlibabaCloud/ProviderAlibabaCloud.cs b/app/MindWork AI Studio/Provider/AlibabaCloud/ProviderAlibabaCloud.cs index cbf87c65..e7d3e523 100644 --- a/app/MindWork AI Studio/Provider/AlibabaCloud/ProviderAlibabaCloud.cs +++ b/app/MindWork AI Studio/Provider/AlibabaCloud/ProviderAlibabaCloud.cs @@ -36,6 +36,9 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the AlibabaCloud HTTP chat request: var alibabaCloudChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest { @@ -63,6 +66,7 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int } }).ToList()], Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -141,84 +145,6 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int return this.LoadModels(["text-embedding-"], token, apiKeyProvisional).ContinueWith(t => t.Result.Concat(additionalModels).OrderBy(x => x.Id).AsEnumerable(), token); } - /// - public override IReadOnlyCollection GetModelCapabilities(Model model) - { - var modelName = model.Id.ToLowerInvariant().AsSpan(); - - // Qwen models: - if (modelName.StartsWith("qwen")) - { - // Check for omni models: - if (modelName.IndexOf("omni") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.AUDIO_INPUT, Capability.SPEECH_INPUT, - Capability.VIDEO_INPUT, - - Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT, - - Capability.CHAT_COMPLETION_API, - ]; - - // Check for Qwen 3: - if(modelName.StartsWith("qwen3")) - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - - Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - if(modelName.IndexOf("-vl-") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.CHAT_COMPLETION_API, - ]; - } - - // QwQ models: - if (modelName.StartsWith("qwq")) - { - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - - Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - } - - // QVQ models: - if (modelName.StartsWith("qvq")) - { - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.ALWAYS_REASONING, - Capability.CHAT_COMPLETION_API, - ]; - } - - // Default to text input and output: - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - } #endregion diff --git a/app/MindWork AI Studio/Provider/Anthropic/ChatRequest.cs b/app/MindWork AI Studio/Provider/Anthropic/ChatRequest.cs index 0a15098e..f7103bd7 100644 --- a/app/MindWork AI Studio/Provider/Anthropic/ChatRequest.cs +++ b/app/MindWork AI Studio/Provider/Anthropic/ChatRequest.cs @@ -1,3 +1,4 @@ +using System.Text.Json.Serialization; using AIStudio.Provider.OpenAI; namespace AIStudio.Provider.Anthropic; @@ -16,4 +17,9 @@ public readonly record struct ChatRequest( int MaxTokens, bool Stream, string System -); \ No newline at end of file +) +{ + // Attention: The "required" modifier is not supported for [JsonExtensionData]. + [JsonExtensionData] + public IDictionary AdditionalApiParameters { get; init; } = new Dictionary(); +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/Anthropic/ProviderAnthropic.cs b/app/MindWork AI Studio/Provider/Anthropic/ProviderAnthropic.cs index 96c9306b..f2c88f52 100644 --- a/app/MindWork AI Studio/Provider/Anthropic/ProviderAnthropic.cs +++ b/app/MindWork AI Studio/Provider/Anthropic/ProviderAnthropic.cs @@ -26,6 +26,9 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co var requestedSecret = await RUST_SERVICE.GetAPIKey(this); if(!requestedSecret.Success) yield break; + + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters("system"); // Prepare the Anthropic HTTP chat request: var chatRequest = JsonSerializer.Serialize(new ChatRequest @@ -52,10 +55,11 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co }).ToList()], System = chatThread.PrepareSystemPrompt(settingsManager, chatThread), - MaxTokens = 4_096, + MaxTokens = apiParameters.TryGetValue("max_tokens", out var value) && value is int intValue ? intValue : 4_096, // Right now, we only support streaming completions: Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -113,49 +117,6 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co { return Task.FromResult(Enumerable.Empty()); } - - public override IReadOnlyCollection GetModelCapabilities(Model model) - { - var modelName = model.Id.ToLowerInvariant().AsSpan(); - - // Claude 4.x models: - if(modelName.StartsWith("claude-opus-4") || modelName.StartsWith("claude-sonnet-4")) - return [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - // Claude 3.7 is able to do reasoning: - if(modelName.StartsWith("claude-3-7")) - return [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - // All other 3.x models are able to process text and images as input: - if(modelName.StartsWith("claude-3-")) - return [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - // Any other model is able to process text only: - return [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - } #endregion diff --git a/app/MindWork AI Studio/Provider/BaseProvider.cs b/app/MindWork AI Studio/Provider/BaseProvider.cs index 6a28f832..00b4aa26 100644 --- a/app/MindWork AI Studio/Provider/BaseProvider.cs +++ b/app/MindWork AI Studio/Provider/BaseProvider.cs @@ -40,7 +40,8 @@ public abstract class BaseProvider : IProvider, ISecretId protected static readonly JsonSerializerOptions JSON_SERIALIZER_OPTIONS = new() { PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower, - Converters = { new AnnotationConverter() } + Converters = { new AnnotationConverter() }, + AllowTrailingCommas = false }; /// @@ -63,7 +64,10 @@ public abstract class BaseProvider : IProvider, ISecretId /// public abstract string InstanceName { get; set; } - + + /// + public string AdditionalJsonApiParameters { get; init; } = string.Empty; + /// public abstract IAsyncEnumerable StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, CancellationToken token = default); @@ -78,9 +82,6 @@ public abstract class BaseProvider : IProvider, ISecretId /// public abstract Task> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default); - - /// - public abstract IReadOnlyCollection GetModelCapabilities(Model model); #endregion @@ -129,8 +130,7 @@ public abstract class BaseProvider : IProvider, ISecretId if (nextResponse.StatusCode is HttpStatusCode.Forbidden) { await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Block, string.Format(TB("Tried to communicate with the LLM provider '{0}'. You might not be able to use this provider from your location. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); - this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); - this.logger.LogDebug($"Error body: {errorBody}"); + this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody); errorMessage = nextResponse.ReasonPhrase; break; } @@ -138,8 +138,7 @@ public abstract class BaseProvider : IProvider, ISecretId if(nextResponse.StatusCode is HttpStatusCode.BadRequest) { await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The required message format might be changed. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); - this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); - this.logger.LogDebug($"Error body: {errorBody}"); + this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody); errorMessage = nextResponse.ReasonPhrase; break; } @@ -147,8 +146,7 @@ public abstract class BaseProvider : IProvider, ISecretId if(nextResponse.StatusCode is HttpStatusCode.NotFound) { await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); - this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); - this.logger.LogDebug($"Error body: {errorBody}"); + this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody); errorMessage = nextResponse.ReasonPhrase; break; } @@ -156,8 +154,7 @@ public abstract class BaseProvider : IProvider, ISecretId if(nextResponse.StatusCode is HttpStatusCode.Unauthorized) { await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Key, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The API key might be invalid. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); - this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); - this.logger.LogDebug($"Error body: {errorBody}"); + this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody); errorMessage = nextResponse.ReasonPhrase; break; } @@ -165,8 +162,7 @@ public abstract class BaseProvider : IProvider, ISecretId if(nextResponse.StatusCode is HttpStatusCode.InternalServerError) { await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The server might be down or having issues. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); - this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); - this.logger.LogDebug($"Error body: {errorBody}"); + this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody); errorMessage = nextResponse.ReasonPhrase; break; } @@ -174,8 +170,7 @@ public abstract class BaseProvider : IProvider, ISecretId if(nextResponse.StatusCode is HttpStatusCode.ServiceUnavailable) { await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The provider is overloaded. The message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); - this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); - this.logger.LogDebug($"Error body: {errorBody}"); + this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody); errorMessage = nextResponse.ReasonPhrase; break; } @@ -185,13 +180,13 @@ public abstract class BaseProvider : IProvider, ISecretId if(timeSeconds > 90) timeSeconds = 90; - this.logger.LogDebug($"Failed request with status code {nextResponse.StatusCode} (message = '{errorMessage}'). Retrying in {timeSeconds:0.00} seconds."); + this.logger.LogDebug("Failed request with status code {ResponseStatusCode} (message = '{ErrorMessage}'). Retrying in {TimeSeconds:0.00} seconds.", nextResponse.StatusCode, errorMessage, timeSeconds); await Task.Delay(TimeSpan.FromSeconds(timeSeconds), token); } if(retry >= MAX_RETRIES || !string.IsNullOrWhiteSpace(errorMessage)) { - await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'"), this.InstanceName, MAX_RETRIES, errorMessage))); + await MessageBus.INSTANCE.SendError(new DataErrorMessage(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'."), this.InstanceName, MAX_RETRIES, errorMessage))); return new HttpRateLimitedStreamResult(false, true, errorMessage ?? $"Failed after {MAX_RETRIES} retries; no provider message available", response); } @@ -522,4 +517,66 @@ public abstract class BaseProvider : IProvider, ISecretId streamReader.Dispose(); } + + /// + /// Parse and convert API parameters from a provided JSON string into a dictionary, + /// optionally merging additional parameters and removing specific keys. + /// + /// Optional list of keys to remove from the final dictionary + /// (case-insensitive). The parameters stream, model, and messages are removed by default. + protected IDictionary ParseAdditionalApiParameters( + params List keysToRemove) + { + if(string.IsNullOrWhiteSpace(this.AdditionalJsonApiParameters)) + return new Dictionary(); + + try + { + // Wrap the user-provided parameters in curly brackets to form a valid JSON object: + var json = $"{{{this.AdditionalJsonApiParameters}}}"; + var jsonDoc = JsonSerializer.Deserialize(json, JSON_SERIALIZER_OPTIONS); + var dict = ConvertToDictionary(jsonDoc); + + // Some keys are always removed because we set them: + keysToRemove.Add("stream"); + keysToRemove.Add("model"); + keysToRemove.Add("messages"); + + // Remove the specified keys (case-insensitive): + var removeSet = new HashSet(keysToRemove, StringComparer.OrdinalIgnoreCase); + foreach (var key in removeSet) + dict.Remove(key); + + return dict; + } + catch (JsonException ex) + { + this.logger.LogError("Failed to parse additional API parameters: {ExceptionMessage}", ex.Message); + return new Dictionary(); + } + } + + private static IDictionary ConvertToDictionary(JsonElement element) + { + return element.EnumerateObject() + .ToDictionary( + p => p.Name, + p => ConvertJsonValue(p.Value) ?? string.Empty + ); + } + + private static object? ConvertJsonValue(JsonElement element) => element.ValueKind switch + { + JsonValueKind.String => element.GetString(), + JsonValueKind.Number => element.TryGetInt32(out var i) ? i : + element.TryGetInt64(out var l) ? l : + element.TryGetDouble(out var d) ? d : + element.GetDecimal(), + JsonValueKind.True or JsonValueKind.False => element.GetBoolean(), + JsonValueKind.Null => string.Empty, + JsonValueKind.Object => ConvertToDictionary(element), + JsonValueKind.Array => element.EnumerateArray().Select(ConvertJsonValue).ToList(), + + _ => string.Empty, + }; } \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/DeepSeek/ProviderDeepSeek.cs b/app/MindWork AI Studio/Provider/DeepSeek/ProviderDeepSeek.cs index 67eac538..1063262c 100644 --- a/app/MindWork AI Studio/Provider/DeepSeek/ProviderDeepSeek.cs +++ b/app/MindWork AI Studio/Provider/DeepSeek/ProviderDeepSeek.cs @@ -36,6 +36,9 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/ Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the DeepSeek HTTP chat request: var deepSeekChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest { @@ -63,6 +66,7 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/ } }).ToList()], Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -108,28 +112,6 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/ return Task.FromResult(Enumerable.Empty()); } - public override IReadOnlyCollection GetModelCapabilities(Model model) - { - var modelName = model.Id.ToLowerInvariant().AsSpan(); - - if(modelName.IndexOf("reasoner") is not -1) - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - - Capability.ALWAYS_REASONING, - Capability.CHAT_COMPLETION_API, - ]; - - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - Capability.CHAT_COMPLETION_API, - ]; - } - #endregion diff --git a/app/MindWork AI Studio/Provider/Fireworks/ChatRequest.cs b/app/MindWork AI Studio/Provider/Fireworks/ChatRequest.cs index a0e5a7ab..55154ece 100644 --- a/app/MindWork AI Studio/Provider/Fireworks/ChatRequest.cs +++ b/app/MindWork AI Studio/Provider/Fireworks/ChatRequest.cs @@ -1,3 +1,5 @@ +using System.Text.Json.Serialization; + namespace AIStudio.Provider.Fireworks; /// @@ -10,4 +12,9 @@ public readonly record struct ChatRequest( string Model, IList Messages, bool Stream -); \ No newline at end of file +) +{ + // Attention: The "required" modifier is not supported for [JsonExtensionData]. + [JsonExtensionData] + public IDictionary AdditionalApiParameters { get; init; } = new Dictionary(); +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/Fireworks/ProviderFireworks.cs b/app/MindWork AI Studio/Provider/Fireworks/ProviderFireworks.cs index cf66169d..a02c692c 100644 --- a/app/MindWork AI Studio/Provider/Fireworks/ProviderFireworks.cs +++ b/app/MindWork AI Studio/Provider/Fireworks/ProviderFireworks.cs @@ -36,6 +36,9 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the Fireworks HTTP chat request: var fireworksChatRequest = JsonSerializer.Serialize(new ChatRequest { @@ -65,6 +68,7 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere // Right now, we only support streaming completions: Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -110,7 +114,5 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere return Task.FromResult(Enumerable.Empty()); } - public override IReadOnlyCollection GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model); - #endregion } \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/GWDG/ProviderGWDG.cs b/app/MindWork AI Studio/Provider/GWDG/ProviderGWDG.cs index e497fcf2..2a56bfd4 100644 --- a/app/MindWork AI Studio/Provider/GWDG/ProviderGWDG.cs +++ b/app/MindWork AI Studio/Provider/GWDG/ProviderGWDG.cs @@ -36,6 +36,9 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the GWDG HTTP chat request: var gwdgChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest { @@ -63,6 +66,7 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud } }).ToList()], Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -110,8 +114,6 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud return models.Where(model => model.Id.StartsWith("e5-", StringComparison.InvariantCultureIgnoreCase)); } - public override IReadOnlyCollection GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model); - #endregion private async Task> LoadModels(CancellationToken token, string? apiKeyProvisional = null) diff --git a/app/MindWork AI Studio/Provider/Google/ChatRequest.cs b/app/MindWork AI Studio/Provider/Google/ChatRequest.cs index 36b4abde..4fcd03cc 100644 --- a/app/MindWork AI Studio/Provider/Google/ChatRequest.cs +++ b/app/MindWork AI Studio/Provider/Google/ChatRequest.cs @@ -1,3 +1,4 @@ +using System.Text.Json.Serialization; using AIStudio.Provider.OpenAI; namespace AIStudio.Provider.Google; @@ -12,4 +13,9 @@ public readonly record struct ChatRequest( string Model, IList Messages, bool Stream -); \ No newline at end of file +) +{ + // Attention: The "required" modifier is not supported for [JsonExtensionData]. + [JsonExtensionData] + public IDictionary AdditionalApiParameters { get; init; } = new Dictionary(); +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/Google/ProviderGoogle.cs b/app/MindWork AI Studio/Provider/Google/ProviderGoogle.cs index 8586baf7..8dcf0c96 100644 --- a/app/MindWork AI Studio/Provider/Google/ProviderGoogle.cs +++ b/app/MindWork AI Studio/Provider/Google/ProviderGoogle.cs @@ -36,6 +36,9 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the Google HTTP chat request: var geminiChatRequest = JsonSerializer.Serialize(new ChatRequest { @@ -65,6 +68,7 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea // Right now, we only support streaming completions: Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -122,94 +126,6 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea .Select(n => new Provider.Model(n.Name.Replace("models/", string.Empty), n.DisplayName)); } - public override IReadOnlyCollection GetModelCapabilities(Provider.Model model) - { - var modelName = model.Id.ToLowerInvariant().AsSpan(); - - if (modelName.IndexOf("gemini-") is not -1) - { - // Reasoning models: - if (modelName.IndexOf("gemini-2.5") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, - Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, - - Capability.TEXT_OUTPUT, - - Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - // Image generation: - if(modelName.IndexOf("-2.0-flash-preview-image-") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, - Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, - - Capability.TEXT_OUTPUT, Capability.IMAGE_OUTPUT, - Capability.CHAT_COMPLETION_API, - ]; - - // Realtime model: - if(modelName.IndexOf("-2.0-flash-live-") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.AUDIO_INPUT, Capability.SPEECH_INPUT, - Capability.VIDEO_INPUT, - - Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - // The 2.0 flash models cannot call functions: - if(modelName.IndexOf("-2.0-flash-") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, - Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, - - Capability.TEXT_OUTPUT, - Capability.CHAT_COMPLETION_API, - ]; - - // The old 1.0 pro vision model: - if(modelName.IndexOf("pro-vision") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - - Capability.TEXT_OUTPUT, - Capability.CHAT_COMPLETION_API, - ]; - - // Default to all other Gemini models: - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, - Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, - - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - } - - // Default for all other models: - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - } #endregion diff --git a/app/MindWork AI Studio/Provider/Groq/ChatRequest.cs b/app/MindWork AI Studio/Provider/Groq/ChatRequest.cs index 76d23b93..e45683fe 100644 --- a/app/MindWork AI Studio/Provider/Groq/ChatRequest.cs +++ b/app/MindWork AI Studio/Provider/Groq/ChatRequest.cs @@ -1,3 +1,4 @@ +using System.Text.Json.Serialization; using AIStudio.Provider.OpenAI; namespace AIStudio.Provider.Groq; @@ -14,4 +15,9 @@ public readonly record struct ChatRequest( IList Messages, bool Stream, int Seed -); \ No newline at end of file +) +{ + // Attention: The "required" modifier is not supported for [JsonExtensionData]. + [JsonExtensionData] + public IDictionary AdditionalApiParameters { get; init; } = new Dictionary(); +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/Groq/ProviderGroq.cs b/app/MindWork AI Studio/Provider/Groq/ProviderGroq.cs index 88652a15..5cc7b3df 100644 --- a/app/MindWork AI Studio/Provider/Groq/ProviderGroq.cs +++ b/app/MindWork AI Studio/Provider/Groq/ProviderGroq.cs @@ -36,6 +36,9 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the OpenAI HTTP chat request: var groqChatRequest = JsonSerializer.Serialize(new ChatRequest { @@ -65,6 +68,7 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO // Right now, we only support streaming completions: Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -110,8 +114,6 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO return Task.FromResult(Enumerable.Empty()); } - public override IReadOnlyCollection GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model); - #endregion private async Task> LoadModels(CancellationToken token, string? apiKeyProvisional = null) diff --git a/app/MindWork AI Studio/Provider/Helmholtz/ProviderHelmholtz.cs b/app/MindWork AI Studio/Provider/Helmholtz/ProviderHelmholtz.cs index acc0daba..f0b69bb4 100644 --- a/app/MindWork AI Studio/Provider/Helmholtz/ProviderHelmholtz.cs +++ b/app/MindWork AI Studio/Provider/Helmholtz/ProviderHelmholtz.cs @@ -36,6 +36,9 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the Helmholtz HTTP chat request: var helmholtzChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest { @@ -63,6 +66,7 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl } }).ToList()], Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -114,8 +118,6 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl model.Id.Contains("gritlm", StringComparison.InvariantCultureIgnoreCase)); } - public override IReadOnlyCollection GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model); - #endregion private async Task> LoadModels(CancellationToken token, string? apiKeyProvisional = null) diff --git a/app/MindWork AI Studio/Provider/HuggingFace/ProviderHuggingFace.cs b/app/MindWork AI Studio/Provider/HuggingFace/ProviderHuggingFace.cs index 12721d78..6cfb8027 100644 --- a/app/MindWork AI Studio/Provider/HuggingFace/ProviderHuggingFace.cs +++ b/app/MindWork AI Studio/Provider/HuggingFace/ProviderHuggingFace.cs @@ -41,6 +41,9 @@ public sealed class ProviderHuggingFace : BaseProvider Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the HuggingFace HTTP chat request: var huggingfaceChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest { @@ -68,6 +71,7 @@ public sealed class ProviderHuggingFace : BaseProvider } }).ToList()], Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -113,7 +117,5 @@ public sealed class ProviderHuggingFace : BaseProvider return Task.FromResult(Enumerable.Empty()); } - public override IReadOnlyCollection GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model); - #endregion } \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/IProvider.cs b/app/MindWork AI Studio/Provider/IProvider.cs index cede6ca4..e883fec8 100644 --- a/app/MindWork AI Studio/Provider/IProvider.cs +++ b/app/MindWork AI Studio/Provider/IProvider.cs @@ -19,6 +19,11 @@ public interface IProvider /// public string InstanceName { get; } + /// + /// The additional API parameters. + /// + public string AdditionalJsonApiParameters { get; } + /// /// Starts a chat completion stream. /// @@ -64,10 +69,4 @@ public interface IProvider /// The list of embedding models. public Task> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default); - /// - /// Get the capabilities of a model. - /// - /// The model to get the capabilities for. - /// The capabilities of the model. - public IReadOnlyCollection GetModelCapabilities(Model model); } \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/LLMProvidersExtensions.cs b/app/MindWork AI Studio/Provider/LLMProvidersExtensions.cs index d095c937..52fe30a1 100644 --- a/app/MindWork AI Studio/Provider/LLMProvidersExtensions.cs +++ b/app/MindWork AI Studio/Provider/LLMProvidersExtensions.cs @@ -144,7 +144,7 @@ public static class LLMProvidersExtensions /// The provider instance. public static IProvider CreateProvider(this AIStudio.Settings.Provider providerSettings) { - return providerSettings.UsedLLMProvider.CreateProvider(providerSettings.InstanceName, providerSettings.Host, providerSettings.Hostname, providerSettings.Model, providerSettings.HFInferenceProvider); + return providerSettings.UsedLLMProvider.CreateProvider(providerSettings.InstanceName, providerSettings.Host, providerSettings.Hostname, providerSettings.Model, providerSettings.HFInferenceProvider, providerSettings.AdditionalJsonApiParameters); } /// @@ -157,29 +157,29 @@ public static class LLMProvidersExtensions return embeddingProviderSettings.UsedLLMProvider.CreateProvider(embeddingProviderSettings.Name, embeddingProviderSettings.Host, embeddingProviderSettings.Hostname, embeddingProviderSettings.Model, HFInferenceProvider.NONE); } - private static IProvider CreateProvider(this LLMProviders provider, string instanceName, Host host, string hostname, Model model, HFInferenceProvider inferenceProvider) + private static IProvider CreateProvider(this LLMProviders provider, string instanceName, Host host, string hostname, Model model, HFInferenceProvider inferenceProvider, string expertProviderApiParameter = "") { try { return provider switch { - LLMProviders.OPEN_AI => new ProviderOpenAI { InstanceName = instanceName }, - LLMProviders.ANTHROPIC => new ProviderAnthropic { InstanceName = instanceName }, - LLMProviders.MISTRAL => new ProviderMistral { InstanceName = instanceName }, - LLMProviders.GOOGLE => new ProviderGoogle { InstanceName = instanceName }, - LLMProviders.X => new ProviderX { InstanceName = instanceName }, - LLMProviders.DEEP_SEEK => new ProviderDeepSeek { InstanceName = instanceName }, - LLMProviders.ALIBABA_CLOUD => new ProviderAlibabaCloud { InstanceName = instanceName }, - LLMProviders.PERPLEXITY => new ProviderPerplexity { InstanceName = instanceName }, + LLMProviders.OPEN_AI => new ProviderOpenAI { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.ANTHROPIC => new ProviderAnthropic { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.MISTRAL => new ProviderMistral { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.GOOGLE => new ProviderGoogle { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.X => new ProviderX { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.DEEP_SEEK => new ProviderDeepSeek { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.ALIBABA_CLOUD => new ProviderAlibabaCloud { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.PERPLEXITY => new ProviderPerplexity { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, - LLMProviders.GROQ => new ProviderGroq { InstanceName = instanceName }, - LLMProviders.FIREWORKS => new ProviderFireworks { InstanceName = instanceName }, - LLMProviders.HUGGINGFACE => new ProviderHuggingFace(inferenceProvider, model) { InstanceName = instanceName }, + LLMProviders.GROQ => new ProviderGroq { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.FIREWORKS => new ProviderFireworks { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.HUGGINGFACE => new ProviderHuggingFace(inferenceProvider, model) { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, - LLMProviders.SELF_HOSTED => new ProviderSelfHosted(host, hostname) { InstanceName = instanceName }, + LLMProviders.SELF_HOSTED => new ProviderSelfHosted(host, hostname) { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, - LLMProviders.HELMHOLTZ => new ProviderHelmholtz { InstanceName = instanceName }, - LLMProviders.GWDG => new ProviderGWDG { InstanceName = instanceName }, + LLMProviders.HELMHOLTZ => new ProviderHelmholtz { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.GWDG => new ProviderGWDG { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, _ => new NoProvider(), }; diff --git a/app/MindWork AI Studio/Provider/Mistral/ChatRequest.cs b/app/MindWork AI Studio/Provider/Mistral/ChatRequest.cs index 913f8085..b12dd15d 100644 --- a/app/MindWork AI Studio/Provider/Mistral/ChatRequest.cs +++ b/app/MindWork AI Studio/Provider/Mistral/ChatRequest.cs @@ -1,3 +1,5 @@ +using System.Text.Json.Serialization; + namespace AIStudio.Provider.Mistral; /// @@ -14,4 +16,9 @@ public readonly record struct ChatRequest( bool Stream, int RandomSeed, bool SafePrompt = false -); \ No newline at end of file +) +{ + // Attention: The "required" modifier is not supported for [JsonExtensionData]. + [JsonExtensionData] + public IDictionary AdditionalApiParameters { get; init; } = new Dictionary(); +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/Mistral/ProviderMistral.cs b/app/MindWork AI Studio/Provider/Mistral/ProviderMistral.cs index 29b34d87..01b0db11 100644 --- a/app/MindWork AI Studio/Provider/Mistral/ProviderMistral.cs +++ b/app/MindWork AI Studio/Provider/Mistral/ProviderMistral.cs @@ -34,6 +34,9 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/ Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the Mistral HTTP chat request: var mistralChatRequest = JsonSerializer.Serialize(new ChatRequest { @@ -63,7 +66,8 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/ // Right now, we only support streaming completions: Stream = true, - SafePrompt = false, + SafePrompt = apiParameters.TryGetValue("safe_prompt", out var value) && value is true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -122,56 +126,6 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/ return Task.FromResult(Enumerable.Empty()); } - public override IReadOnlyCollection GetModelCapabilities(Provider.Model model) - { - var modelName = model.Id.ToLowerInvariant().AsSpan(); - - // Pixtral models are able to do process images: - if (modelName.IndexOf("pixtral") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - // Mistral medium: - if (modelName.IndexOf("mistral-medium-") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - // Mistral small: - if (modelName.IndexOf("mistral-small-") is not -1) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.CHAT_COMPLETION_API, - ]; - - // Mistral saba: - if (modelName.IndexOf("mistral-saba-") is not -1) - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - Capability.CHAT_COMPLETION_API, - ]; - - // Default: - return CapabilitiesOpenSource.GetCapabilities(model); - } - #endregion private async Task LoadModelList(string? apiKeyProvisional, CancellationToken token) diff --git a/app/MindWork AI Studio/Provider/NoProvider.cs b/app/MindWork AI Studio/Provider/NoProvider.cs index b06ce2e0..b87820ca 100644 --- a/app/MindWork AI Studio/Provider/NoProvider.cs +++ b/app/MindWork AI Studio/Provider/NoProvider.cs @@ -13,6 +13,9 @@ public class NoProvider : IProvider public string InstanceName { get; set; } = "None"; + /// + public string AdditionalJsonApiParameters { get; init; } = string.Empty; + public Task> GetTextModels(string? apiKeyProvisional = null, CancellationToken token = default) => Task.FromResult>([]); public Task> GetImageModels(string? apiKeyProvisional = null, CancellationToken token = default) => Task.FromResult>([]); diff --git a/app/MindWork AI Studio/Provider/OpenAI/ChatCompletionAPIRequest.cs b/app/MindWork AI Studio/Provider/OpenAI/ChatCompletionAPIRequest.cs index 21236284..51805910 100644 --- a/app/MindWork AI Studio/Provider/OpenAI/ChatCompletionAPIRequest.cs +++ b/app/MindWork AI Studio/Provider/OpenAI/ChatCompletionAPIRequest.cs @@ -1,3 +1,5 @@ +using System.Text.Json.Serialization; + namespace AIStudio.Provider.OpenAI; /// @@ -15,4 +17,8 @@ public record ChatCompletionAPIRequest( public ChatCompletionAPIRequest() : this(string.Empty, [], true) { } + + // Attention: The "required" modifier is not supported for [JsonExtensionData]. + [JsonExtensionData] + public IDictionary AdditionalApiParameters { get; init; } = new Dictionary(); } \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/OpenAI/ProviderOpenAI.cs b/app/MindWork AI Studio/Provider/OpenAI/ProviderOpenAI.cs index 406c1f53..be38cedc 100644 --- a/app/MindWork AI Studio/Provider/OpenAI/ProviderOpenAI.cs +++ b/app/MindWork AI Studio/Provider/OpenAI/ProviderOpenAI.cs @@ -59,7 +59,7 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/" }; // Read the model capabilities: - var modelCapabilities = this.GetModelCapabilities(chatModel); + var modelCapabilities = ProviderExtensions.GetModelCapabilitiesOpenAI(chatModel); // Check if we are using the Responses API or the Chat Completion API: var usingResponsesAPI = modelCapabilities.Contains(Capability.RESPONSES_API); @@ -85,6 +85,10 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/" _ => [] }; + + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters("input", "store", "tools"); + // // Create the request: either for the Responses API or the Chat Completion API // @@ -119,6 +123,7 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/" // Right now, we only support streaming completions: Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS), // Responses API request: @@ -157,6 +162,9 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/" // Tools we want to use: Tools = tools, + // Additional API parameters: + AdditionalApiParameters = apiParameters + }, JSON_SERIALIZER_OPTIONS), }; @@ -215,144 +223,6 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/" return this.LoadModels(["text-embedding-"], token, apiKeyProvisional); } - public override IReadOnlyCollection GetModelCapabilities(Model model) - { - var modelName = model.Id.ToLowerInvariant().AsSpan(); - - if (modelName is "gpt-4o-search-preview") - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - - Capability.WEB_SEARCH, - Capability.CHAT_COMPLETION_API, - ]; - - if (modelName is "gpt-4o-mini-search-preview") - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - - Capability.WEB_SEARCH, - Capability.CHAT_COMPLETION_API, - ]; - - if (modelName.StartsWith("o1-mini")) - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - - Capability.ALWAYS_REASONING, - Capability.CHAT_COMPLETION_API, - ]; - - if(modelName is "gpt-3.5-turbo") - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - Capability.RESPONSES_API, - ]; - - if(modelName.StartsWith("gpt-3.5")) - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - Capability.CHAT_COMPLETION_API, - ]; - - if (modelName.StartsWith("chatgpt-4o-")) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - Capability.RESPONSES_API, - ]; - - if (modelName.StartsWith("o3-mini")) - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - - Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, - Capability.RESPONSES_API, - ]; - - if (modelName.StartsWith("o4-mini") || modelName.StartsWith("o3")) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, - Capability.WEB_SEARCH, - Capability.RESPONSES_API, - ]; - - if (modelName.StartsWith("o1")) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, - Capability.RESPONSES_API, - ]; - - if(modelName.StartsWith("gpt-4-turbo")) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.RESPONSES_API, - ]; - - if(modelName is "gpt-4" || modelName.StartsWith("gpt-4-")) - return - [ - Capability.TEXT_INPUT, - Capability.TEXT_OUTPUT, - Capability.RESPONSES_API, - ]; - - if(modelName.StartsWith("gpt-5-nano")) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING, - Capability.RESPONSES_API, - ]; - - if(modelName is "gpt-5" || modelName.StartsWith("gpt-5-")) - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING, - Capability.WEB_SEARCH, - Capability.RESPONSES_API, - ]; - - return - [ - Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, - Capability.TEXT_OUTPUT, - - Capability.FUNCTION_CALLING, - Capability.RESPONSES_API, - ]; - } - #endregion private async Task> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null) diff --git a/app/MindWork AI Studio/Provider/OpenAI/ResponsesAPIRequest.cs b/app/MindWork AI Studio/Provider/OpenAI/ResponsesAPIRequest.cs index b5525b8f..e3c9541b 100644 --- a/app/MindWork AI Studio/Provider/OpenAI/ResponsesAPIRequest.cs +++ b/app/MindWork AI Studio/Provider/OpenAI/ResponsesAPIRequest.cs @@ -1,3 +1,5 @@ +using System.Text.Json.Serialization; + namespace AIStudio.Provider.OpenAI; /// @@ -18,4 +20,8 @@ public record ResponsesAPIRequest( public ResponsesAPIRequest() : this(string.Empty, [], true, false, []) { } + + // Attention: The "required" modifier is not supported for [JsonExtensionData]. + [JsonExtensionData] + public IDictionary AdditionalApiParameters { get; init; } = new Dictionary(); } \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/Perplexity/ProviderPerplexity.cs b/app/MindWork AI Studio/Provider/Perplexity/ProviderPerplexity.cs index 4c65e5f4..a15a7e3a 100644 --- a/app/MindWork AI Studio/Provider/Perplexity/ProviderPerplexity.cs +++ b/app/MindWork AI Studio/Provider/Perplexity/ProviderPerplexity.cs @@ -45,6 +45,9 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity. Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the Perplexity HTTP chat request: var perplexityChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest { @@ -72,6 +75,7 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity. } }).ToList()], Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -117,38 +121,6 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity. return Task.FromResult(Enumerable.Empty()); } - public override IReadOnlyCollection GetModelCapabilities(Model model) - { - var modelName = model.Id.ToLowerInvariant().AsSpan(); - - if(modelName.IndexOf("reasoning") is not -1 || - modelName.IndexOf("deep-research") is not -1) - return - [ - Capability.TEXT_INPUT, - Capability.MULTIPLE_IMAGE_INPUT, - - Capability.TEXT_OUTPUT, - Capability.IMAGE_OUTPUT, - - Capability.ALWAYS_REASONING, - Capability.WEB_SEARCH, - Capability.CHAT_COMPLETION_API, - ]; - - return - [ - Capability.TEXT_INPUT, - Capability.MULTIPLE_IMAGE_INPUT, - - Capability.TEXT_OUTPUT, - Capability.IMAGE_OUTPUT, - - Capability.WEB_SEARCH, - Capability.CHAT_COMPLETION_API, - ]; - } - #endregion private Task> LoadModels() => Task.FromResult>(KNOWN_MODELS); diff --git a/app/MindWork AI Studio/Provider/SelfHosted/ChatRequest.cs b/app/MindWork AI Studio/Provider/SelfHosted/ChatRequest.cs index db05e365..4791692c 100644 --- a/app/MindWork AI Studio/Provider/SelfHosted/ChatRequest.cs +++ b/app/MindWork AI Studio/Provider/SelfHosted/ChatRequest.cs @@ -1,3 +1,5 @@ +using System.Text.Json.Serialization; + namespace AIStudio.Provider.SelfHosted; /// @@ -10,4 +12,9 @@ public readonly record struct ChatRequest( string Model, IList Messages, bool Stream -); \ No newline at end of file +) +{ + // Attention: The "required" modifier is not supported for [JsonExtensionData]. + [JsonExtensionData] + public IDictionary AdditionalApiParameters { get; init; } = new Dictionary(); +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/SelfHosted/ProviderSelfHosted.cs b/app/MindWork AI Studio/Provider/SelfHosted/ProviderSelfHosted.cs index df64be6b..abb15532 100644 --- a/app/MindWork AI Studio/Provider/SelfHosted/ProviderSelfHosted.cs +++ b/app/MindWork AI Studio/Provider/SelfHosted/ProviderSelfHosted.cs @@ -32,6 +32,9 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the OpenAI HTTP chat request: var providerChatRequest = JsonSerializer.Serialize(new ChatRequest { @@ -60,7 +63,8 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide }).ToList()], // Right now, we only support streaming completions: - Stream = true + Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -142,8 +146,6 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide } } - public override IReadOnlyCollection GetModelCapabilities(Provider.Model model) => CapabilitiesOpenSource.GetCapabilities(model); - #endregion private async Task> LoadModels(string[] ignorePhrases, string[] filterPhrases, CancellationToken token, string? apiKeyProvisional = null) diff --git a/app/MindWork AI Studio/Provider/X/ProviderX.cs b/app/MindWork AI Studio/Provider/X/ProviderX.cs index d649eecd..b1743c53 100644 --- a/app/MindWork AI Studio/Provider/X/ProviderX.cs +++ b/app/MindWork AI Studio/Provider/X/ProviderX.cs @@ -36,6 +36,9 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER) Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), }; + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + // Prepare the xAI HTTP chat request: var xChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest { @@ -65,6 +68,7 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER) // Right now, we only support streaming completions: Stream = true, + AdditionalApiParameters = apiParameters }, JSON_SERIALIZER_OPTIONS); async Task RequestBuilder() @@ -111,8 +115,6 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER) return Task.FromResult>([]); } - public override IReadOnlyCollection GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model); - #endregion private async Task> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null) diff --git a/app/MindWork AI Studio/Settings/Provider.cs b/app/MindWork AI Studio/Settings/Provider.cs index 94a1a747..59bef6f3 100644 --- a/app/MindWork AI Studio/Settings/Provider.cs +++ b/app/MindWork AI Studio/Settings/Provider.cs @@ -31,7 +31,8 @@ public sealed record Provider( Guid EnterpriseConfigurationPluginId = default, string Hostname = "http://localhost:1234", Host Host = Host.NONE, - HFInferenceProvider HFInferenceProvider = HFInferenceProvider.NONE) : ConfigurationBaseObject, ISecretId + HFInferenceProvider HFInferenceProvider = HFInferenceProvider.NONE, + string AdditionalJsonApiParameters = "") : ConfigurationBaseObject, ISecretId { private static readonly ILogger LOGGER = Program.LOGGER_FACTORY.CreateLogger(); @@ -132,6 +133,12 @@ public sealed record Provider( LOGGER.LogWarning($"The configured provider {idx} does not contain a valid model configuration."); return false; } + + if (!table.TryGetValue("AdditionalJsonApiParameters", out var additionalJsonApiParametersValue) || !additionalJsonApiParametersValue.TryRead(out var additionalJsonApiParameters)) + { + LOGGER.LogWarning($"The configured provider {idx} does not contain valid additional JSON API parameters."); + return false; + } provider = new Provider { @@ -144,7 +151,8 @@ public sealed record Provider( IsEnterpriseConfiguration = true, EnterpriseConfigurationPluginId = configPluginId, Hostname = hostname, - Host = host + Host = host, + AdditionalJsonApiParameters = additionalJsonApiParameters, }; return true; diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Alibaba.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Alibaba.cs new file mode 100644 index 00000000..c7e1a20e --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Alibaba.cs @@ -0,0 +1,84 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesAlibaba(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + // Qwen models: + if (modelName.StartsWith("qwen")) + { + // Check for omni models: + if (modelName.IndexOf("omni") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.AUDIO_INPUT, Capability.SPEECH_INPUT, + Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT, + + Capability.CHAT_COMPLETION_API, + ]; + + // Check for Qwen 3: + if(modelName.StartsWith("qwen3")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + if(modelName.IndexOf("-vl-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.CHAT_COMPLETION_API, + ]; + } + + // QwQ models: + if (modelName.StartsWith("qwq")) + { + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // QVQ models: + if (modelName.StartsWith("qvq")) + { + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // Default to text input and output: + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Anthropic.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Anthropic.cs new file mode 100644 index 00000000..3bf2f0b5 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Anthropic.cs @@ -0,0 +1,49 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesAnthropic(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + // Claude 4.x models: + if(modelName.StartsWith("claude-opus-4") || modelName.StartsWith("claude-sonnet-4")) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Claude 3.7 is able to do reasoning: + if(modelName.StartsWith("claude-3-7")) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // All other 3.x models are able to process text and images as input: + if(modelName.StartsWith("claude-3-")) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Any other model is able to process text only: + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.DeepSeek.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.DeepSeek.cs new file mode 100644 index 00000000..0d3428e9 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.DeepSeek.cs @@ -0,0 +1,28 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesDeepSeek(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + if(modelName.IndexOf("reasoner") is not -1) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Google.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Google.cs new file mode 100644 index 00000000..379370c1 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Google.cs @@ -0,0 +1,95 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesGoogle(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + if (modelName.IndexOf("gemini-") is not -1) + { + // Reasoning models: + if (modelName.IndexOf("gemini-2.5") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, + Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Image generation: + if(modelName.IndexOf("-2.0-flash-preview-image-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, + Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, Capability.IMAGE_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // Realtime model: + if(modelName.IndexOf("-2.0-flash-live-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.AUDIO_INPUT, Capability.SPEECH_INPUT, + Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // The 2.0 flash models cannot call functions: + if(modelName.IndexOf("-2.0-flash-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, + Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // The old 1.0 pro vision model: + if(modelName.IndexOf("pro-vision") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // Default to all other Gemini models: + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT, + Capability.SPEECH_INPUT, Capability.VIDEO_INPUT, + + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // Default for all other models: + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Mistral.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Mistral.cs new file mode 100644 index 00000000..c778e2b0 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Mistral.cs @@ -0,0 +1,56 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesMistral(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + // Pixtral models are able to do process images: + if (modelName.IndexOf("pixtral") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Mistral medium: + if (modelName.IndexOf("mistral-medium-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Mistral small: + if (modelName.IndexOf("mistral-small-") is not -1) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Mistral saba: + if (modelName.IndexOf("mistral-saba-") is not -1) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // Default: + return GetModelCapabilitiesOpenSource(model); + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.OpenAI.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.OpenAI.cs new file mode 100644 index 00000000..c3352b2e --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.OpenAI.cs @@ -0,0 +1,144 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesOpenAI(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + if (modelName is "gpt-4o-search-preview") + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.WEB_SEARCH, + Capability.CHAT_COMPLETION_API, + ]; + + if (modelName is "gpt-4o-mini-search-preview") + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.WEB_SEARCH, + Capability.CHAT_COMPLETION_API, + ]; + + if (modelName.StartsWith("o1-mini")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + + if(modelName is "gpt-3.5-turbo") + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.RESPONSES_API, + ]; + + if(modelName.StartsWith("gpt-3.5")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + if (modelName.StartsWith("chatgpt-4o-")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.RESPONSES_API, + ]; + + if (modelName.StartsWith("o3-mini")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.RESPONSES_API, + ]; + + if (modelName.StartsWith("o4-mini") || modelName.StartsWith("o3")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.WEB_SEARCH, + Capability.RESPONSES_API, + ]; + + if (modelName.StartsWith("o1")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING, + Capability.RESPONSES_API, + ]; + + if(modelName.StartsWith("gpt-4-turbo")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.RESPONSES_API, + ]; + + if(modelName is "gpt-4" || modelName.StartsWith("gpt-4-")) + return + [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.RESPONSES_API, + ]; + + if(modelName.StartsWith("gpt-5-nano")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING, + Capability.RESPONSES_API, + ]; + + if(modelName is "gpt-5" || modelName.StartsWith("gpt-5-")) + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING, + Capability.WEB_SEARCH, + Capability.RESPONSES_API, + ]; + + return + [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + + Capability.FUNCTION_CALLING, + Capability.RESPONSES_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/CapabilitiesOpenSource.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.OpenSource.cs similarity index 97% rename from app/MindWork AI Studio/Provider/CapabilitiesOpenSource.cs rename to app/MindWork AI Studio/Settings/ProviderExtensions.OpenSource.cs index 1444ec34..ac934d86 100644 --- a/app/MindWork AI Studio/Provider/CapabilitiesOpenSource.cs +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.OpenSource.cs @@ -1,8 +1,10 @@ -namespace AIStudio.Provider; +using AIStudio.Provider; -public static class CapabilitiesOpenSource +namespace AIStudio.Settings; + +public static partial class ProviderExtensions { - public static IReadOnlyCollection GetCapabilities(Model model) + public static List GetModelCapabilitiesOpenSource(Model model) { var modelName = model.Id.ToLowerInvariant().AsSpan(); diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.Perplexity.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.Perplexity.cs new file mode 100644 index 00000000..da873009 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.Perplexity.cs @@ -0,0 +1,38 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesPerplexity(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + if(modelName.IndexOf("reasoning") is not -1 || + modelName.IndexOf("deep-research") is not -1) + return + [ + Capability.TEXT_INPUT, + Capability.MULTIPLE_IMAGE_INPUT, + + Capability.TEXT_OUTPUT, + Capability.IMAGE_OUTPUT, + + Capability.ALWAYS_REASONING, + Capability.WEB_SEARCH, + Capability.CHAT_COMPLETION_API, + ]; + + return + [ + Capability.TEXT_INPUT, + Capability.MULTIPLE_IMAGE_INPUT, + + Capability.TEXT_OUTPUT, + Capability.IMAGE_OUTPUT, + + Capability.WEB_SEARCH, + Capability.CHAT_COMPLETION_API, + ]; + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.cs new file mode 100644 index 00000000..002295c3 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.cs @@ -0,0 +1,29 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilities(this Provider provider) => provider.UsedLLMProvider switch + { + LLMProviders.OPEN_AI => GetModelCapabilitiesOpenAI(provider.Model), + LLMProviders.MISTRAL => GetModelCapabilitiesMistral(provider.Model), + LLMProviders.ANTHROPIC => GetModelCapabilitiesAnthropic(provider.Model), + LLMProviders.GOOGLE => GetModelCapabilitiesGoogle(provider.Model), + LLMProviders.X => GetModelCapabilitiesOpenSource(provider.Model), + LLMProviders.DEEP_SEEK => GetModelCapabilitiesDeepSeek(provider.Model), + LLMProviders.ALIBABA_CLOUD => GetModelCapabilitiesAlibaba(provider.Model), + LLMProviders.PERPLEXITY => GetModelCapabilitiesPerplexity(provider.Model), + + LLMProviders.GROQ => GetModelCapabilitiesOpenSource(provider.Model), + LLMProviders.FIREWORKS => GetModelCapabilitiesOpenSource(provider.Model), + LLMProviders.HUGGINGFACE => GetModelCapabilitiesOpenSource(provider.Model), + + LLMProviders.HELMHOLTZ => GetModelCapabilitiesOpenSource(provider.Model), + LLMProviders.GWDG => GetModelCapabilitiesOpenSource(provider.Model), + + LLMProviders.SELF_HOSTED => GetModelCapabilitiesOpenSource(provider.Model), + + _ => [] + }; +} \ No newline at end of file diff --git a/app/MindWork AI Studio/wwwroot/changelog/v0.9.53.md b/app/MindWork AI Studio/wwwroot/changelog/v0.9.53.md index 55f4a879..0e38272a 100644 --- a/app/MindWork AI Studio/wwwroot/changelog/v0.9.53.md +++ b/app/MindWork AI Studio/wwwroot/changelog/v0.9.53.md @@ -1,3 +1,4 @@ # v0.9.53, build 228 (2025-10-xx xx:xx UTC) +- Added expert settings to the provider dialog to enable setting additional parameters. Also, additional parameters can be configured by configuration plugins for enterprise scenarios. Thanks to Peer (`peerschuett`) for this contribution. - Added the ability to export AI responses from the chat into Microsoft Word files. Thank you, Sabrina (`Sabrina-devops`), for your first contribution. -- Removed the character limit for profiles \ No newline at end of file +- Removed the character limit for profiles. \ No newline at end of file