Added expert mode to providers (#562)
Some checks are pending
Build and Release / Build app (${{ matrix.dotnet_runtime }}) (-x86_64-apple-darwin, osx-x64, macos-latest, x86_64-apple-darwin, dmg updater) (push) Blocked by required conditions
Build and Release / Build app (${{ matrix.dotnet_runtime }}) (-x86_64-pc-windows-msvc.exe, win-x64, windows-latest, x86_64-pc-windows-msvc, nsis updater) (push) Blocked by required conditions
Build and Release / Build app (${{ matrix.dotnet_runtime }}) (-x86_64-unknown-linux-gnu, linux-x64, ubuntu-22.04, x86_64-unknown-linux-gnu, appimage deb updater) (push) Blocked by required conditions
Build and Release / Prepare & create release (push) Blocked by required conditions
Build and Release / Read metadata (push) Waiting to run
Build and Release / Build app (${{ matrix.dotnet_runtime }}) (-aarch64-apple-darwin, osx-arm64, macos-latest, aarch64-apple-darwin, dmg updater) (push) Blocked by required conditions
Build and Release / Build app (${{ matrix.dotnet_runtime }}) (-aarch64-pc-windows-msvc.exe, win-arm64, windows-latest, aarch64-pc-windows-msvc, nsis updater) (push) Blocked by required conditions
Build and Release / Build app (${{ matrix.dotnet_runtime }}) (-aarch64-unknown-linux-gnu, linux-arm64, ubuntu-22.04-arm, aarch64-unknown-linux-gnu, appimage deb updater) (push) Blocked by required conditions
Build and Release / Publish release (push) Blocked by required conditions

Co-authored-by: Thorsten Sommer <SommerEngineering@users.noreply.github.com>
This commit is contained in:
Peer Schütt 2025-11-13 18:13:16 +01:00 committed by GitHub
parent d6f5dc143d
commit 9ef1a047f0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
44 changed files with 860 additions and 531 deletions

View File

@ -3028,9 +3028,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T900713019"] = "Cancel"
-- The profile name must be unique; the chosen name is already in use. -- The profile name must be unique; the chosen name is already in use.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "The profile name must be unique; the chosen name is already in use." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "The profile name must be unique; the chosen name is already in use."
-- Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1017509792"] = "Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model."
-- Hugging Face Inference Provider -- Hugging Face Inference Provider
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inference Provider" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inference Provider"
-- Hide Expert Settings
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1108876344"] = "Hide Expert Settings"
-- Failed to store the API key in the operating system. The message was: {0}. Please try again. -- Failed to store the API key in the operating system. The message was: {0}. Please try again.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Failed to store the API key in the operating system. The message was: {0}. Please try again." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Failed to store the API key in the operating system. The message was: {0}. Please try again."
@ -3043,6 +3049,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1356621346"] = "Create acco
-- Load models -- Load models
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Load models" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Load models"
-- Add the parameters in proper JSON formatting, e.g., "temperature": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1689135032"] = "Add the parameters in proper JSON formatting, e.g., \"temperature\": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though."
-- Hostname -- Hostname
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname"
@ -3064,12 +3073,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2331453405"] = "(Optional)
-- Add -- Add
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Add" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Add"
-- Additional API parameters
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2728244552"] = "Additional API parameters"
-- No models loaded or available. -- No models loaded or available.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "No models loaded or available." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "No models loaded or available."
-- Instance Name -- Instance Name
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instance Name" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instance Name"
-- Show Expert Settings
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Show Expert Settings"
-- Show available models -- Show available models
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Show available models" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Show available models"
@ -4813,8 +4828,8 @@ UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1674355816"] = "Tried to com
-- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}' -- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'"
-- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}' -- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'.
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2249520705"] = "Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2181034173"] = "Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'."
-- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}' -- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'"

View File

@ -72,6 +72,7 @@ public partial class SettingsPanelProviders : SettingsPanelBase
{ x => x.IsEditing, true }, { x => x.IsEditing, true },
{ x => x.DataHost, provider.Host }, { x => x.DataHost, provider.Host },
{ x => x.HFInferenceProviderId, provider.HFInferenceProvider }, { x => x.HFInferenceProviderId, provider.HFInferenceProvider },
{ x => x.AdditionalJsonApiParameters, provider.AdditionalJsonApiParameters },
}; };
var dialogReference = await this.DialogService.ShowAsync<ProviderDialog>(T("Edit LLM Provider"), dialogParameters, DialogOptions.FULLSCREEN); var dialogReference = await this.DialogService.ShowAsync<ProviderDialog>(T("Edit LLM Provider"), dialogParameters, DialogOptions.FULLSCREEN);

View File

@ -130,6 +130,18 @@
UserAttributes="@SPELLCHECK_ATTRIBUTES" UserAttributes="@SPELLCHECK_ATTRIBUTES"
/> />
<MudStack>
<MudButton OnClick="@this.ToggleExpertSettings">
@(this.showExpertSettings ? T("Hide Expert Settings") : T("Show Expert Settings"))
</MudButton>
<MudDivider />
<MudCollapse Expanded="@this.showExpertSettings" Class="@this.GetExpertStyles">
<MudJustifiedText Class="mb-5">
@T("Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model.")
</MudJustifiedText>
<MudTextField T="string" Label=@T("Additional API parameters") Variant="Variant.Outlined" Lines="4" AutoGrow="true" MaxLines="10" HelperText=@T("""Add the parameters in proper JSON formatting, e.g., "temperature": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though.""") Placeholder="@GetPlaceholderExpertSettings" @bind-Value="@this.AdditionalJsonApiParameters" OnBlur="@this.OnInputChangeExpertSettings"/>
</MudCollapse>
</MudStack>
</MudForm> </MudForm>
<Issues IssuesData="@this.dataIssues"/> <Issues IssuesData="@this.dataIssues"/>
</DialogContent> </DialogContent>

View File

@ -78,6 +78,9 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
[Parameter] [Parameter]
public bool IsEditing { get; init; } public bool IsEditing { get; init; }
[Parameter]
public string AdditionalJsonApiParameters { get; set; } = string.Empty;
[Inject] [Inject]
private RustService RustService { get; init; } = null!; private RustService RustService { get; init; } = null!;
@ -94,6 +97,7 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
private string dataManuallyModel = string.Empty; private string dataManuallyModel = string.Empty;
private string dataAPIKeyStorageIssue = string.Empty; private string dataAPIKeyStorageIssue = string.Empty;
private string dataEditingPreviousInstanceName = string.Empty; private string dataEditingPreviousInstanceName = string.Empty;
private bool showExpertSettings;
// We get the form reference from Blazor code to validate it manually: // We get the form reference from Blazor code to validate it manually:
private MudForm form = null!; private MudForm form = null!;
@ -135,6 +139,7 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
Hostname = cleanedHostname.EndsWith('/') ? cleanedHostname[..^1] : cleanedHostname, Hostname = cleanedHostname.EndsWith('/') ? cleanedHostname[..^1] : cleanedHostname,
Host = this.DataHost, Host = this.DataHost,
HFInferenceProvider = this.HFInferenceProviderId, HFInferenceProvider = this.HFInferenceProviderId,
AdditionalJsonApiParameters = this.AdditionalJsonApiParameters,
}; };
} }
@ -149,6 +154,8 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
#pragma warning disable MWAIS0001 #pragma warning disable MWAIS0001
this.UsedInstanceNames = this.SettingsManager.ConfigurationData.Providers.Select(x => x.InstanceName.ToLowerInvariant()).ToList(); this.UsedInstanceNames = this.SettingsManager.ConfigurationData.Providers.Select(x => x.InstanceName.ToLowerInvariant()).ToList();
#pragma warning restore MWAIS0001 #pragma warning restore MWAIS0001
this.showExpertSettings = !string.IsNullOrWhiteSpace(this.AdditionalJsonApiParameters);
// When editing, we need to load the data: // When editing, we need to load the data:
if(this.IsEditing) if(this.IsEditing)
@ -268,4 +275,20 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
LLMProviders.SELF_HOSTED => T("(Optional) API Key"), LLMProviders.SELF_HOSTED => T("(Optional) API Key"),
_ => T("API Key"), _ => T("API Key"),
}; };
private void ToggleExpertSettings() => this.showExpertSettings = !this.showExpertSettings;
private void OnInputChangeExpertSettings()
{
this.AdditionalJsonApiParameters = this.AdditionalJsonApiParameters.Trim().TrimEnd(',', ' ');
}
private string GetExpertStyles => this.showExpertSettings ? "border-2 border-dashed rounded pa-2" : string.Empty;
private static string GetPlaceholderExpertSettings =>
"""
"temperature": 0.5,
"top_p": 0.9,
"frequency_penalty": 0.0
""";
} }

View File

@ -47,15 +47,24 @@ DEPRECATION_MESSAGE = ""
CONFIG = {} CONFIG = {}
CONFIG["LLM_PROVIDERS"] = {} CONFIG["LLM_PROVIDERS"] = {}
-- An example of a configuration for a self-hosted ollama server: -- An example of a configuration for a self-hosted server:
CONFIG["LLM_PROVIDERS"][#CONFIG["LLM_PROVIDERS"]+1] = { CONFIG["LLM_PROVIDERS"][#CONFIG["LLM_PROVIDERS"]+1] = {
["Id"] = "00000000-0000-0000-0000-000000000000", ["Id"] = "00000000-0000-0000-0000-000000000000",
["InstanceName"] = "<user-friendly name for the combination of server and model>", ["InstanceName"] = "<user-friendly name for the combination of server and model>",
["UsedLLMProvider"] = "SELF_HOSTED", ["UsedLLMProvider"] = "SELF_HOSTED",
-- Allowed values for Host are: LM_STUDIO, LLAMACPP, OLLAMA, and VLLM
["Host"] = "OLLAMA", ["Host"] = "OLLAMA",
["Hostname"] = "<https address of the ollama server>", ["Hostname"] = "<https address of the server>",
-- Optional: Additional parameters for the API.
-- Please refer to the documentation of the selected host for details.
-- Might be something like ... \"temperature\": 0.5 ... for one parameter.
-- Could be something like ... \"temperature\": 0.5, \"max_tokens\": 1000 ... for multiple parameters.
-- Please do not add the enclosing curly braces {} here. Also, no trailing comma is allowed.
["AdditionalJsonApiParameters"] = "",
["Model"] = { ["Model"] = {
["Id"] = "<the ollama model ID>", ["Id"] = "<the model ID>",
["DisplayName"] = "<user-friendly name of the model>", ["DisplayName"] = "<user-friendly name of the model>",
} }
} }

View File

@ -3030,9 +3030,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T900713019"] = "Abbrechen"
-- The profile name must be unique; the chosen name is already in use. -- The profile name must be unique; the chosen name is already in use.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "Der Profilname muss eindeutig sein; der ausgewählte Name wird bereits verwendet." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "Der Profilname muss eindeutig sein; der ausgewählte Name wird bereits verwendet."
-- Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1017509792"] = "Bitte beachten Sie: Dieser Bereich ist nur für Expertinnen und Experten. Sie sind dafür verantwortlich, die Korrektheit der zusätzlichen Parameter zu überprüfen, die Sie beim APIAufruf angeben. Standardmäßig verwendet AI Studio die OpenAIkompatible Chat Completions-API, sofern diese vom zugrunde liegenden Dienst und Modell unterstützt wird."
-- Hugging Face Inference Provider -- Hugging Face Inference Provider
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inferenz-Anbieter" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inferenz-Anbieter"
-- Hide Expert Settings
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1108876344"] = "Experten-Einstellungen ausblenden"
-- Failed to store the API key in the operating system. The message was: {0}. Please try again. -- Failed to store the API key in the operating system. The message was: {0}. Please try again.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Der API-Schlüssel konnte nicht im Betriebssystem gespeichert werden. Die Meldung war: {0}. Bitte versuchen Sie es erneut." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Der API-Schlüssel konnte nicht im Betriebssystem gespeichert werden. Die Meldung war: {0}. Bitte versuchen Sie es erneut."
@ -3045,6 +3051,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1356621346"] = "Konto erste
-- Load models -- Load models
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Modelle laden" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Modelle laden"
-- Add the parameters in proper JSON formatting, e.g., "temperature": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1689135032"] = "Fügen Sie die Parameter in korrekter JSON-Formatierung hinzu, z. B. \"temperature\": 0.5. Entfernen Sie abschließende Kommas. Die üblichen äußeren geschweiften Klammern {} dürfen dabei jedoch nicht verwendet werden."
-- Hostname -- Hostname
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname"
@ -3066,12 +3075,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2331453405"] = "(Optional)
-- Add -- Add
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Hinzufügen" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Hinzufügen"
-- Additional API parameters
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2728244552"] = "Zusätzliche API-Parameter"
-- No models loaded or available. -- No models loaded or available.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "Keine Modelle geladen oder verfügbar." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "Keine Modelle geladen oder verfügbar."
-- Instance Name -- Instance Name
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instanzname" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instanzname"
-- Show Expert Settings
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Experten-Einstellungen anzeigen"
-- Show available models -- Show available models
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Verfügbare Modelle anzeigen" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Verfügbare Modelle anzeigen"
@ -4815,8 +4830,8 @@ UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1674355816"] = "Es wurde ver
-- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}' -- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Der Versuch, die Antwort des LLM-Anbieters '{0}' zu streamen, ist fehlgeschlagen. Der Stream konnte nicht gelesen werden. Die Meldung lautet: '{1}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Der Versuch, die Antwort des LLM-Anbieters '{0}' zu streamen, ist fehlgeschlagen. Der Stream konnte nicht gelesen werden. Die Meldung lautet: '{1}'"
-- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}' -- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'.
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2249520705"] = "Es wurde versucht, mit dem LLM-Anbieter '{0}' zu kommunizieren. Auch nach {1} Versuchen gab es Probleme mit der Anfrage. Die Nachricht des Anbieters lautet: '{2}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2181034173"] = "Versuchte, mit dem LLM-Anbieter '{0}' zu kommunizieren. Auch nach {1} Wiederholungsversuchen gab es Probleme mit der Anfrage. Die Meldung des Anbieters lautet: '{2}'."
-- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}' -- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Es wurde versucht, mit dem LLM-Anbieter '{0}' zu kommunizieren. Etwas wurde nicht gefunden. Die Meldung des Anbieters lautet: '{1}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Es wurde versucht, mit dem LLM-Anbieter '{0}' zu kommunizieren. Etwas wurde nicht gefunden. Die Meldung des Anbieters lautet: '{1}'"

View File

@ -3030,9 +3030,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T900713019"] = "Cancel"
-- The profile name must be unique; the chosen name is already in use. -- The profile name must be unique; the chosen name is already in use.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "The profile name must be unique; the chosen name is already in use." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROFILEDIALOG::T911748898"] = "The profile name must be unique; the chosen name is already in use."
-- Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1017509792"] = "Please be aware: This section is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call. By default, AI Studio uses the OpenAI-compatible chat completions API, when that it is supported by the underlying service and model."
-- Hugging Face Inference Provider -- Hugging Face Inference Provider
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inference Provider" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1085481431"] = "Hugging Face Inference Provider"
-- Hide Expert Settings
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1108876344"] = "Hide Expert Settings"
-- Failed to store the API key in the operating system. The message was: {0}. Please try again. -- Failed to store the API key in the operating system. The message was: {0}. Please try again.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Failed to store the API key in the operating system. The message was: {0}. Please try again." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1122745046"] = "Failed to store the API key in the operating system. The message was: {0}. Please try again."
@ -3045,6 +3051,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1356621346"] = "Create acco
-- Load models -- Load models
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Load models" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T15352225"] = "Load models"
-- Add the parameters in proper JSON formatting, e.g., "temperature": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1689135032"] = "Add the parameters in proper JSON formatting, e.g., \"temperature\": 0.5. Remove trailing commas. The usual surrounding curly brackets {} must not be used, though."
-- Hostname -- Hostname
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T1727440780"] = "Hostname"
@ -3066,12 +3075,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2331453405"] = "(Optional)
-- Add -- Add
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Add" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2646845972"] = "Add"
-- Additional API parameters
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2728244552"] = "Additional API parameters"
-- No models loaded or available. -- No models loaded or available.
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "No models loaded or available." UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2810182573"] = "No models loaded or available."
-- Instance Name -- Instance Name
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instance Name" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T2842060373"] = "Instance Name"
-- Show Expert Settings
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Show Expert Settings"
-- Show available models -- Show available models
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Show available models" UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Show available models"
@ -4815,8 +4830,8 @@ UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1674355816"] = "Tried to com
-- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}' -- Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T1856278860"] = "Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'"
-- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}' -- Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'.
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2249520705"] = "Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2181034173"] = "Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'."
-- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}' -- Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'
UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'" UI_TEXT_CONTENT["AISTUDIO::PROVIDER::BASEPROVIDER::T2780552614"] = "Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'"

View File

@ -36,6 +36,9 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the AlibabaCloud HTTP chat request: // Prepare the AlibabaCloud HTTP chat request:
var alibabaCloudChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest var alibabaCloudChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{ {
@ -63,6 +66,7 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int
} }
}).ToList()], }).ToList()],
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -141,84 +145,6 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int
return this.LoadModels(["text-embedding-"], token, apiKeyProvisional).ContinueWith(t => t.Result.Concat(additionalModels).OrderBy(x => x.Id).AsEnumerable(), token); return this.LoadModels(["text-embedding-"], token, apiKeyProvisional).ContinueWith(t => t.Result.Concat(additionalModels).OrderBy(x => x.Id).AsEnumerable(), token);
} }
/// <inheritdoc />
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Qwen models:
if (modelName.StartsWith("qwen"))
{
// Check for omni models:
if (modelName.IndexOf("omni") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.AUDIO_INPUT, Capability.SPEECH_INPUT,
Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Check for Qwen 3:
if(modelName.StartsWith("qwen3"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
if(modelName.IndexOf("-vl-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
}
// QwQ models:
if (modelName.StartsWith("qwq"))
{
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
// QVQ models:
if (modelName.StartsWith("qvq"))
{
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
}
// Default to text input and output:
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
#endregion #endregion

View File

@ -1,3 +1,4 @@
using System.Text.Json.Serialization;
using AIStudio.Provider.OpenAI; using AIStudio.Provider.OpenAI;
namespace AIStudio.Provider.Anthropic; namespace AIStudio.Provider.Anthropic;
@ -16,4 +17,9 @@ public readonly record struct ChatRequest(
int MaxTokens, int MaxTokens,
bool Stream, bool Stream,
string System string System
); )
{
// Attention: The "required" modifier is not supported for [JsonExtensionData].
[JsonExtensionData]
public IDictionary<string, object> AdditionalApiParameters { get; init; } = new Dictionary<string, object>();
}

View File

@ -26,6 +26,9 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co
var requestedSecret = await RUST_SERVICE.GetAPIKey(this); var requestedSecret = await RUST_SERVICE.GetAPIKey(this);
if(!requestedSecret.Success) if(!requestedSecret.Success)
yield break; yield break;
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters("system");
// Prepare the Anthropic HTTP chat request: // Prepare the Anthropic HTTP chat request:
var chatRequest = JsonSerializer.Serialize(new ChatRequest var chatRequest = JsonSerializer.Serialize(new ChatRequest
@ -52,10 +55,11 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co
}).ToList()], }).ToList()],
System = chatThread.PrepareSystemPrompt(settingsManager, chatThread), System = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
MaxTokens = 4_096, MaxTokens = apiParameters.TryGetValue("max_tokens", out var value) && value is int intValue ? intValue : 4_096,
// Right now, we only support streaming completions: // Right now, we only support streaming completions:
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -113,49 +117,6 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co
{ {
return Task.FromResult(Enumerable.Empty<Model>()); return Task.FromResult(Enumerable.Empty<Model>());
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Claude 4.x models:
if(modelName.StartsWith("claude-opus-4") || modelName.StartsWith("claude-sonnet-4"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Claude 3.7 is able to do reasoning:
if(modelName.StartsWith("claude-3-7"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// All other 3.x models are able to process text and images as input:
if(modelName.StartsWith("claude-3-"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Any other model is able to process text only:
return [
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
#endregion #endregion

View File

@ -40,7 +40,8 @@ public abstract class BaseProvider : IProvider, ISecretId
protected static readonly JsonSerializerOptions JSON_SERIALIZER_OPTIONS = new() protected static readonly JsonSerializerOptions JSON_SERIALIZER_OPTIONS = new()
{ {
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower, PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
Converters = { new AnnotationConverter() } Converters = { new AnnotationConverter() },
AllowTrailingCommas = false
}; };
/// <summary> /// <summary>
@ -63,7 +64,10 @@ public abstract class BaseProvider : IProvider, ISecretId
/// <inheritdoc /> /// <inheritdoc />
public abstract string InstanceName { get; set; } public abstract string InstanceName { get; set; }
/// <inheritdoc />
public string AdditionalJsonApiParameters { get; init; } = string.Empty;
/// <inheritdoc /> /// <inheritdoc />
public abstract IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, CancellationToken token = default); public abstract IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, CancellationToken token = default);
@ -78,9 +82,6 @@ public abstract class BaseProvider : IProvider, ISecretId
/// <inheritdoc /> /// <inheritdoc />
public abstract Task<IEnumerable<Model>> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default); public abstract Task<IEnumerable<Model>> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default);
/// <inheritdoc />
public abstract IReadOnlyCollection<Capability> GetModelCapabilities(Model model);
#endregion #endregion
@ -129,8 +130,7 @@ public abstract class BaseProvider : IProvider, ISecretId
if (nextResponse.StatusCode is HttpStatusCode.Forbidden) if (nextResponse.StatusCode is HttpStatusCode.Forbidden)
{ {
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Block, string.Format(TB("Tried to communicate with the LLM provider '{0}'. You might not be able to use this provider from your location. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Block, string.Format(TB("Tried to communicate with the LLM provider '{0}'. You might not be able to use this provider from your location. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody);
this.logger.LogDebug($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase; errorMessage = nextResponse.ReasonPhrase;
break; break;
} }
@ -138,8 +138,7 @@ public abstract class BaseProvider : IProvider, ISecretId
if(nextResponse.StatusCode is HttpStatusCode.BadRequest) if(nextResponse.StatusCode is HttpStatusCode.BadRequest)
{ {
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The required message format might be changed. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The required message format might be changed. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody);
this.logger.LogDebug($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase; errorMessage = nextResponse.ReasonPhrase;
break; break;
} }
@ -147,8 +146,7 @@ public abstract class BaseProvider : IProvider, ISecretId
if(nextResponse.StatusCode is HttpStatusCode.NotFound) if(nextResponse.StatusCode is HttpStatusCode.NotFound)
{ {
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody);
this.logger.LogDebug($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase; errorMessage = nextResponse.ReasonPhrase;
break; break;
} }
@ -156,8 +154,7 @@ public abstract class BaseProvider : IProvider, ISecretId
if(nextResponse.StatusCode is HttpStatusCode.Unauthorized) if(nextResponse.StatusCode is HttpStatusCode.Unauthorized)
{ {
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Key, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The API key might be invalid. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Key, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The API key might be invalid. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody);
this.logger.LogDebug($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase; errorMessage = nextResponse.ReasonPhrase;
break; break;
} }
@ -165,8 +162,7 @@ public abstract class BaseProvider : IProvider, ISecretId
if(nextResponse.StatusCode is HttpStatusCode.InternalServerError) if(nextResponse.StatusCode is HttpStatusCode.InternalServerError)
{ {
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The server might be down or having issues. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The server might be down or having issues. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody);
this.logger.LogDebug($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase; errorMessage = nextResponse.ReasonPhrase;
break; break;
} }
@ -174,8 +170,7 @@ public abstract class BaseProvider : IProvider, ISecretId
if(nextResponse.StatusCode is HttpStatusCode.ServiceUnavailable) if(nextResponse.StatusCode is HttpStatusCode.ServiceUnavailable)
{ {
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The provider is overloaded. The message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase))); await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The provider is overloaded. The message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}')."); this.logger.LogError("Failed request with status code {ResposeStatusCode} (message = '{ResponseReasonPhrase}', error body = '{ErrorBody}').", nextResponse.StatusCode, nextResponse.ReasonPhrase, errorBody);
this.logger.LogDebug($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase; errorMessage = nextResponse.ReasonPhrase;
break; break;
} }
@ -185,13 +180,13 @@ public abstract class BaseProvider : IProvider, ISecretId
if(timeSeconds > 90) if(timeSeconds > 90)
timeSeconds = 90; timeSeconds = 90;
this.logger.LogDebug($"Failed request with status code {nextResponse.StatusCode} (message = '{errorMessage}'). Retrying in {timeSeconds:0.00} seconds."); this.logger.LogDebug("Failed request with status code {ResponseStatusCode} (message = '{ErrorMessage}'). Retrying in {TimeSeconds:0.00} seconds.", nextResponse.StatusCode, errorMessage, timeSeconds);
await Task.Delay(TimeSpan.FromSeconds(timeSeconds), token); await Task.Delay(TimeSpan.FromSeconds(timeSeconds), token);
} }
if(retry >= MAX_RETRIES || !string.IsNullOrWhiteSpace(errorMessage)) if(retry >= MAX_RETRIES || !string.IsNullOrWhiteSpace(errorMessage))
{ {
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'"), this.InstanceName, MAX_RETRIES, errorMessage))); await MessageBus.INSTANCE.SendError(new DataErrorMessage(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'."), this.InstanceName, MAX_RETRIES, errorMessage)));
return new HttpRateLimitedStreamResult(false, true, errorMessage ?? $"Failed after {MAX_RETRIES} retries; no provider message available", response); return new HttpRateLimitedStreamResult(false, true, errorMessage ?? $"Failed after {MAX_RETRIES} retries; no provider message available", response);
} }
@ -522,4 +517,66 @@ public abstract class BaseProvider : IProvider, ISecretId
streamReader.Dispose(); streamReader.Dispose();
} }
/// <summary>
/// Parse and convert API parameters from a provided JSON string into a dictionary,
/// optionally merging additional parameters and removing specific keys.
/// </summary>
/// <param name="keysToRemove">Optional list of keys to remove from the final dictionary
/// (case-insensitive). The parameters stream, model, and messages are removed by default.</param>
protected IDictionary<string, object> ParseAdditionalApiParameters(
params List<string> keysToRemove)
{
if(string.IsNullOrWhiteSpace(this.AdditionalJsonApiParameters))
return new Dictionary<string, object>();
try
{
// Wrap the user-provided parameters in curly brackets to form a valid JSON object:
var json = $"{{{this.AdditionalJsonApiParameters}}}";
var jsonDoc = JsonSerializer.Deserialize<JsonElement>(json, JSON_SERIALIZER_OPTIONS);
var dict = ConvertToDictionary(jsonDoc);
// Some keys are always removed because we set them:
keysToRemove.Add("stream");
keysToRemove.Add("model");
keysToRemove.Add("messages");
// Remove the specified keys (case-insensitive):
var removeSet = new HashSet<string>(keysToRemove, StringComparer.OrdinalIgnoreCase);
foreach (var key in removeSet)
dict.Remove(key);
return dict;
}
catch (JsonException ex)
{
this.logger.LogError("Failed to parse additional API parameters: {ExceptionMessage}", ex.Message);
return new Dictionary<string, object>();
}
}
private static IDictionary<string, object> ConvertToDictionary(JsonElement element)
{
return element.EnumerateObject()
.ToDictionary<JsonProperty, string, object>(
p => p.Name,
p => ConvertJsonValue(p.Value) ?? string.Empty
);
}
private static object? ConvertJsonValue(JsonElement element) => element.ValueKind switch
{
JsonValueKind.String => element.GetString(),
JsonValueKind.Number => element.TryGetInt32(out var i) ? i :
element.TryGetInt64(out var l) ? l :
element.TryGetDouble(out var d) ? d :
element.GetDecimal(),
JsonValueKind.True or JsonValueKind.False => element.GetBoolean(),
JsonValueKind.Null => string.Empty,
JsonValueKind.Object => ConvertToDictionary(element),
JsonValueKind.Array => element.EnumerateArray().Select(ConvertJsonValue).ToList(),
_ => string.Empty,
};
} }

View File

@ -36,6 +36,9 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the DeepSeek HTTP chat request: // Prepare the DeepSeek HTTP chat request:
var deepSeekChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest var deepSeekChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{ {
@ -63,6 +66,7 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/
} }
}).ToList()], }).ToList()],
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -108,28 +112,6 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/
return Task.FromResult(Enumerable.Empty<Model>()); return Task.FromResult(Enumerable.Empty<Model>());
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if(modelName.IndexOf("reasoner") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
}
#endregion #endregion

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.Fireworks; namespace AIStudio.Provider.Fireworks;
/// <summary> /// <summary>
@ -10,4 +12,9 @@ public readonly record struct ChatRequest(
string Model, string Model,
IList<Message> Messages, IList<Message> Messages,
bool Stream bool Stream
); )
{
// Attention: The "required" modifier is not supported for [JsonExtensionData].
[JsonExtensionData]
public IDictionary<string, object> AdditionalApiParameters { get; init; } = new Dictionary<string, object>();
}

View File

@ -36,6 +36,9 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the Fireworks HTTP chat request: // Prepare the Fireworks HTTP chat request:
var fireworksChatRequest = JsonSerializer.Serialize(new ChatRequest var fireworksChatRequest = JsonSerializer.Serialize(new ChatRequest
{ {
@ -65,6 +68,7 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere
// Right now, we only support streaming completions: // Right now, we only support streaming completions:
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -110,7 +114,5 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere
return Task.FromResult(Enumerable.Empty<Model>()); return Task.FromResult(Enumerable.Empty<Model>());
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion #endregion
} }

View File

@ -36,6 +36,9 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the GWDG HTTP chat request: // Prepare the GWDG HTTP chat request:
var gwdgChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest var gwdgChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{ {
@ -63,6 +66,7 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud
} }
}).ToList()], }).ToList()],
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -110,8 +114,6 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud
return models.Where(model => model.Id.StartsWith("e5-", StringComparison.InvariantCultureIgnoreCase)); return models.Where(model => model.Id.StartsWith("e5-", StringComparison.InvariantCultureIgnoreCase));
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion #endregion
private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null) private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null)

View File

@ -1,3 +1,4 @@
using System.Text.Json.Serialization;
using AIStudio.Provider.OpenAI; using AIStudio.Provider.OpenAI;
namespace AIStudio.Provider.Google; namespace AIStudio.Provider.Google;
@ -12,4 +13,9 @@ public readonly record struct ChatRequest(
string Model, string Model,
IList<Message> Messages, IList<Message> Messages,
bool Stream bool Stream
); )
{
// Attention: The "required" modifier is not supported for [JsonExtensionData].
[JsonExtensionData]
public IDictionary<string, object> AdditionalApiParameters { get; init; } = new Dictionary<string, object>();
}

View File

@ -36,6 +36,9 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the Google HTTP chat request: // Prepare the Google HTTP chat request:
var geminiChatRequest = JsonSerializer.Serialize(new ChatRequest var geminiChatRequest = JsonSerializer.Serialize(new ChatRequest
{ {
@ -65,6 +68,7 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea
// Right now, we only support streaming completions: // Right now, we only support streaming completions:
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -122,94 +126,6 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea
.Select(n => new Provider.Model(n.Name.Replace("models/", string.Empty), n.DisplayName)); .Select(n => new Provider.Model(n.Name.Replace("models/", string.Empty), n.DisplayName));
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if (modelName.IndexOf("gemini-") is not -1)
{
// Reasoning models:
if (modelName.IndexOf("gemini-2.5") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Image generation:
if(modelName.IndexOf("-2.0-flash-preview-image-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.IMAGE_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Realtime model:
if(modelName.IndexOf("-2.0-flash-live-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.AUDIO_INPUT, Capability.SPEECH_INPUT,
Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// The 2.0 flash models cannot call functions:
if(modelName.IndexOf("-2.0-flash-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// The old 1.0 pro vision model:
if(modelName.IndexOf("pro-vision") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Default to all other Gemini models:
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
// Default for all other models:
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
#endregion #endregion

View File

@ -1,3 +1,4 @@
using System.Text.Json.Serialization;
using AIStudio.Provider.OpenAI; using AIStudio.Provider.OpenAI;
namespace AIStudio.Provider.Groq; namespace AIStudio.Provider.Groq;
@ -14,4 +15,9 @@ public readonly record struct ChatRequest(
IList<Message> Messages, IList<Message> Messages,
bool Stream, bool Stream,
int Seed int Seed
); )
{
// Attention: The "required" modifier is not supported for [JsonExtensionData].
[JsonExtensionData]
public IDictionary<string, object> AdditionalApiParameters { get; init; } = new Dictionary<string, object>();
}

View File

@ -36,6 +36,9 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the OpenAI HTTP chat request: // Prepare the OpenAI HTTP chat request:
var groqChatRequest = JsonSerializer.Serialize(new ChatRequest var groqChatRequest = JsonSerializer.Serialize(new ChatRequest
{ {
@ -65,6 +68,7 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO
// Right now, we only support streaming completions: // Right now, we only support streaming completions:
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -110,8 +114,6 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO
return Task.FromResult(Enumerable.Empty<Model>()); return Task.FromResult(Enumerable.Empty<Model>());
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion #endregion
private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null) private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null)

View File

@ -36,6 +36,9 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the Helmholtz HTTP chat request: // Prepare the Helmholtz HTTP chat request:
var helmholtzChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest var helmholtzChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{ {
@ -63,6 +66,7 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl
} }
}).ToList()], }).ToList()],
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -114,8 +118,6 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl
model.Id.Contains("gritlm", StringComparison.InvariantCultureIgnoreCase)); model.Id.Contains("gritlm", StringComparison.InvariantCultureIgnoreCase));
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion #endregion
private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null) private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null)

View File

@ -41,6 +41,9 @@ public sealed class ProviderHuggingFace : BaseProvider
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the HuggingFace HTTP chat request: // Prepare the HuggingFace HTTP chat request:
var huggingfaceChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest var huggingfaceChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{ {
@ -68,6 +71,7 @@ public sealed class ProviderHuggingFace : BaseProvider
} }
}).ToList()], }).ToList()],
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -113,7 +117,5 @@ public sealed class ProviderHuggingFace : BaseProvider
return Task.FromResult(Enumerable.Empty<Model>()); return Task.FromResult(Enumerable.Empty<Model>());
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion #endregion
} }

View File

@ -19,6 +19,11 @@ public interface IProvider
/// </summary> /// </summary>
public string InstanceName { get; } public string InstanceName { get; }
/// <summary>
/// The additional API parameters.
/// </summary>
public string AdditionalJsonApiParameters { get; }
/// <summary> /// <summary>
/// Starts a chat completion stream. /// Starts a chat completion stream.
/// </summary> /// </summary>
@ -64,10 +69,4 @@ public interface IProvider
/// <returns>The list of embedding models.</returns> /// <returns>The list of embedding models.</returns>
public Task<IEnumerable<Model>> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default); public Task<IEnumerable<Model>> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default);
/// <summary>
/// Get the capabilities of a model.
/// </summary>
/// <param name="model">The model to get the capabilities for.</param>
/// <returns>The capabilities of the model.</returns>
public IReadOnlyCollection<Capability> GetModelCapabilities(Model model);
} }

View File

@ -144,7 +144,7 @@ public static class LLMProvidersExtensions
/// <returns>The provider instance.</returns> /// <returns>The provider instance.</returns>
public static IProvider CreateProvider(this AIStudio.Settings.Provider providerSettings) public static IProvider CreateProvider(this AIStudio.Settings.Provider providerSettings)
{ {
return providerSettings.UsedLLMProvider.CreateProvider(providerSettings.InstanceName, providerSettings.Host, providerSettings.Hostname, providerSettings.Model, providerSettings.HFInferenceProvider); return providerSettings.UsedLLMProvider.CreateProvider(providerSettings.InstanceName, providerSettings.Host, providerSettings.Hostname, providerSettings.Model, providerSettings.HFInferenceProvider, providerSettings.AdditionalJsonApiParameters);
} }
/// <summary> /// <summary>
@ -157,29 +157,29 @@ public static class LLMProvidersExtensions
return embeddingProviderSettings.UsedLLMProvider.CreateProvider(embeddingProviderSettings.Name, embeddingProviderSettings.Host, embeddingProviderSettings.Hostname, embeddingProviderSettings.Model, HFInferenceProvider.NONE); return embeddingProviderSettings.UsedLLMProvider.CreateProvider(embeddingProviderSettings.Name, embeddingProviderSettings.Host, embeddingProviderSettings.Hostname, embeddingProviderSettings.Model, HFInferenceProvider.NONE);
} }
private static IProvider CreateProvider(this LLMProviders provider, string instanceName, Host host, string hostname, Model model, HFInferenceProvider inferenceProvider) private static IProvider CreateProvider(this LLMProviders provider, string instanceName, Host host, string hostname, Model model, HFInferenceProvider inferenceProvider, string expertProviderApiParameter = "")
{ {
try try
{ {
return provider switch return provider switch
{ {
LLMProviders.OPEN_AI => new ProviderOpenAI { InstanceName = instanceName }, LLMProviders.OPEN_AI => new ProviderOpenAI { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.ANTHROPIC => new ProviderAnthropic { InstanceName = instanceName }, LLMProviders.ANTHROPIC => new ProviderAnthropic { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.MISTRAL => new ProviderMistral { InstanceName = instanceName }, LLMProviders.MISTRAL => new ProviderMistral { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.GOOGLE => new ProviderGoogle { InstanceName = instanceName }, LLMProviders.GOOGLE => new ProviderGoogle { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.X => new ProviderX { InstanceName = instanceName }, LLMProviders.X => new ProviderX { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.DEEP_SEEK => new ProviderDeepSeek { InstanceName = instanceName }, LLMProviders.DEEP_SEEK => new ProviderDeepSeek { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.ALIBABA_CLOUD => new ProviderAlibabaCloud { InstanceName = instanceName }, LLMProviders.ALIBABA_CLOUD => new ProviderAlibabaCloud { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.PERPLEXITY => new ProviderPerplexity { InstanceName = instanceName }, LLMProviders.PERPLEXITY => new ProviderPerplexity { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.GROQ => new ProviderGroq { InstanceName = instanceName }, LLMProviders.GROQ => new ProviderGroq { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.FIREWORKS => new ProviderFireworks { InstanceName = instanceName }, LLMProviders.FIREWORKS => new ProviderFireworks { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.HUGGINGFACE => new ProviderHuggingFace(inferenceProvider, model) { InstanceName = instanceName }, LLMProviders.HUGGINGFACE => new ProviderHuggingFace(inferenceProvider, model) { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.SELF_HOSTED => new ProviderSelfHosted(host, hostname) { InstanceName = instanceName }, LLMProviders.SELF_HOSTED => new ProviderSelfHosted(host, hostname) { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.HELMHOLTZ => new ProviderHelmholtz { InstanceName = instanceName }, LLMProviders.HELMHOLTZ => new ProviderHelmholtz { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
LLMProviders.GWDG => new ProviderGWDG { InstanceName = instanceName }, LLMProviders.GWDG => new ProviderGWDG { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter },
_ => new NoProvider(), _ => new NoProvider(),
}; };

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.Mistral; namespace AIStudio.Provider.Mistral;
/// <summary> /// <summary>
@ -14,4 +16,9 @@ public readonly record struct ChatRequest(
bool Stream, bool Stream,
int RandomSeed, int RandomSeed,
bool SafePrompt = false bool SafePrompt = false
); )
{
// Attention: The "required" modifier is not supported for [JsonExtensionData].
[JsonExtensionData]
public IDictionary<string, object> AdditionalApiParameters { get; init; } = new Dictionary<string, object>();
}

View File

@ -34,6 +34,9 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the Mistral HTTP chat request: // Prepare the Mistral HTTP chat request:
var mistralChatRequest = JsonSerializer.Serialize(new ChatRequest var mistralChatRequest = JsonSerializer.Serialize(new ChatRequest
{ {
@ -63,7 +66,8 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/
// Right now, we only support streaming completions: // Right now, we only support streaming completions:
Stream = true, Stream = true,
SafePrompt = false, SafePrompt = apiParameters.TryGetValue("safe_prompt", out var value) && value is true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -122,56 +126,6 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/
return Task.FromResult(Enumerable.Empty<Provider.Model>()); return Task.FromResult(Enumerable.Empty<Provider.Model>());
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Pixtral models are able to do process images:
if (modelName.IndexOf("pixtral") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral medium:
if (modelName.IndexOf("mistral-medium-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral small:
if (modelName.IndexOf("mistral-small-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral saba:
if (modelName.IndexOf("mistral-saba-") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Default:
return CapabilitiesOpenSource.GetCapabilities(model);
}
#endregion #endregion
private async Task<ModelsResponse> LoadModelList(string? apiKeyProvisional, CancellationToken token) private async Task<ModelsResponse> LoadModelList(string? apiKeyProvisional, CancellationToken token)

View File

@ -13,6 +13,9 @@ public class NoProvider : IProvider
public string InstanceName { get; set; } = "None"; public string InstanceName { get; set; } = "None";
/// <inheritdoc />
public string AdditionalJsonApiParameters { get; init; } = string.Empty;
public Task<IEnumerable<Model>> GetTextModels(string? apiKeyProvisional = null, CancellationToken token = default) => Task.FromResult<IEnumerable<Model>>([]); public Task<IEnumerable<Model>> GetTextModels(string? apiKeyProvisional = null, CancellationToken token = default) => Task.FromResult<IEnumerable<Model>>([]);
public Task<IEnumerable<Model>> GetImageModels(string? apiKeyProvisional = null, CancellationToken token = default) => Task.FromResult<IEnumerable<Model>>([]); public Task<IEnumerable<Model>> GetImageModels(string? apiKeyProvisional = null, CancellationToken token = default) => Task.FromResult<IEnumerable<Model>>([]);

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.OpenAI; namespace AIStudio.Provider.OpenAI;
/// <summary> /// <summary>
@ -15,4 +17,8 @@ public record ChatCompletionAPIRequest(
public ChatCompletionAPIRequest() : this(string.Empty, [], true) public ChatCompletionAPIRequest() : this(string.Empty, [], true)
{ {
} }
// Attention: The "required" modifier is not supported for [JsonExtensionData].
[JsonExtensionData]
public IDictionary<string, object> AdditionalApiParameters { get; init; } = new Dictionary<string, object>();
} }

View File

@ -59,7 +59,7 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
}; };
// Read the model capabilities: // Read the model capabilities:
var modelCapabilities = this.GetModelCapabilities(chatModel); var modelCapabilities = ProviderExtensions.GetModelCapabilitiesOpenAI(chatModel);
// Check if we are using the Responses API or the Chat Completion API: // Check if we are using the Responses API or the Chat Completion API:
var usingResponsesAPI = modelCapabilities.Contains(Capability.RESPONSES_API); var usingResponsesAPI = modelCapabilities.Contains(Capability.RESPONSES_API);
@ -85,6 +85,10 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
_ => [] _ => []
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters("input", "store", "tools");
// //
// Create the request: either for the Responses API or the Chat Completion API // Create the request: either for the Responses API or the Chat Completion API
// //
@ -119,6 +123,7 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
// Right now, we only support streaming completions: // Right now, we only support streaming completions:
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS), }, JSON_SERIALIZER_OPTIONS),
// Responses API request: // Responses API request:
@ -157,6 +162,9 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
// Tools we want to use: // Tools we want to use:
Tools = tools, Tools = tools,
// Additional API parameters:
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS), }, JSON_SERIALIZER_OPTIONS),
}; };
@ -215,144 +223,6 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
return this.LoadModels(["text-embedding-"], token, apiKeyProvisional); return this.LoadModels(["text-embedding-"], token, apiKeyProvisional);
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if (modelName is "gpt-4o-search-preview")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
if (modelName is "gpt-4o-mini-search-preview")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
if (modelName.StartsWith("o1-mini"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
if(modelName is "gpt-3.5-turbo")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-3.5"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
if (modelName.StartsWith("chatgpt-4o-"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o3-mini"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o4-mini") || modelName.StartsWith("o3"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.WEB_SEARCH,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o1"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-4-turbo"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if(modelName is "gpt-4" || modelName.StartsWith("gpt-4-"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-5-nano"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING,
Capability.RESPONSES_API,
];
if(modelName is "gpt-5" || modelName.StartsWith("gpt-5-"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING,
Capability.WEB_SEARCH,
Capability.RESPONSES_API,
];
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
}
#endregion #endregion
private async Task<IEnumerable<Model>> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null) private async Task<IEnumerable<Model>> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null)

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.OpenAI; namespace AIStudio.Provider.OpenAI;
/// <summary> /// <summary>
@ -18,4 +20,8 @@ public record ResponsesAPIRequest(
public ResponsesAPIRequest() : this(string.Empty, [], true, false, []) public ResponsesAPIRequest() : this(string.Empty, [], true, false, [])
{ {
} }
// Attention: The "required" modifier is not supported for [JsonExtensionData].
[JsonExtensionData]
public IDictionary<string, object> AdditionalApiParameters { get; init; } = new Dictionary<string, object>();
} }

View File

@ -45,6 +45,9 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity.
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the Perplexity HTTP chat request: // Prepare the Perplexity HTTP chat request:
var perplexityChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest var perplexityChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{ {
@ -72,6 +75,7 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity.
} }
}).ToList()], }).ToList()],
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -117,38 +121,6 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity.
return Task.FromResult(Enumerable.Empty<Model>()); return Task.FromResult(Enumerable.Empty<Model>());
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if(modelName.IndexOf("reasoning") is not -1 ||
modelName.IndexOf("deep-research") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.IMAGE_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
return
[
Capability.TEXT_INPUT,
Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.IMAGE_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
}
#endregion #endregion
private Task<IEnumerable<Model>> LoadModels() => Task.FromResult<IEnumerable<Model>>(KNOWN_MODELS); private Task<IEnumerable<Model>> LoadModels() => Task.FromResult<IEnumerable<Model>>(KNOWN_MODELS);

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.SelfHosted; namespace AIStudio.Provider.SelfHosted;
/// <summary> /// <summary>
@ -10,4 +12,9 @@ public readonly record struct ChatRequest(
string Model, string Model,
IList<Message> Messages, IList<Message> Messages,
bool Stream bool Stream
); )
{
// Attention: The "required" modifier is not supported for [JsonExtensionData].
[JsonExtensionData]
public IDictionary<string, object> AdditionalApiParameters { get; init; } = new Dictionary<string, object>();
}

View File

@ -32,6 +32,9 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the OpenAI HTTP chat request: // Prepare the OpenAI HTTP chat request:
var providerChatRequest = JsonSerializer.Serialize(new ChatRequest var providerChatRequest = JsonSerializer.Serialize(new ChatRequest
{ {
@ -60,7 +63,8 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
}).ToList()], }).ToList()],
// Right now, we only support streaming completions: // Right now, we only support streaming completions:
Stream = true Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -142,8 +146,6 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
} }
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion #endregion
private async Task<IEnumerable<Provider.Model>> LoadModels(string[] ignorePhrases, string[] filterPhrases, CancellationToken token, string? apiKeyProvisional = null) private async Task<IEnumerable<Provider.Model>> LoadModels(string[] ignorePhrases, string[] filterPhrases, CancellationToken token, string? apiKeyProvisional = null)

View File

@ -36,6 +36,9 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER)
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
}; };
// Parse the API parameters:
var apiParameters = this.ParseAdditionalApiParameters();
// Prepare the xAI HTTP chat request: // Prepare the xAI HTTP chat request:
var xChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest var xChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{ {
@ -65,6 +68,7 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER)
// Right now, we only support streaming completions: // Right now, we only support streaming completions:
Stream = true, Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS); }, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder() async Task<HttpRequestMessage> RequestBuilder()
@ -111,8 +115,6 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER)
return Task.FromResult<IEnumerable<Model>>([]); return Task.FromResult<IEnumerable<Model>>([]);
} }
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion #endregion
private async Task<IEnumerable<Model>> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null) private async Task<IEnumerable<Model>> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null)

View File

@ -31,7 +31,8 @@ public sealed record Provider(
Guid EnterpriseConfigurationPluginId = default, Guid EnterpriseConfigurationPluginId = default,
string Hostname = "http://localhost:1234", string Hostname = "http://localhost:1234",
Host Host = Host.NONE, Host Host = Host.NONE,
HFInferenceProvider HFInferenceProvider = HFInferenceProvider.NONE) : ConfigurationBaseObject, ISecretId HFInferenceProvider HFInferenceProvider = HFInferenceProvider.NONE,
string AdditionalJsonApiParameters = "") : ConfigurationBaseObject, ISecretId
{ {
private static readonly ILogger<Provider> LOGGER = Program.LOGGER_FACTORY.CreateLogger<Provider>(); private static readonly ILogger<Provider> LOGGER = Program.LOGGER_FACTORY.CreateLogger<Provider>();
@ -132,6 +133,12 @@ public sealed record Provider(
LOGGER.LogWarning($"The configured provider {idx} does not contain a valid model configuration."); LOGGER.LogWarning($"The configured provider {idx} does not contain a valid model configuration.");
return false; return false;
} }
if (!table.TryGetValue("AdditionalJsonApiParameters", out var additionalJsonApiParametersValue) || !additionalJsonApiParametersValue.TryRead<string>(out var additionalJsonApiParameters))
{
LOGGER.LogWarning($"The configured provider {idx} does not contain valid additional JSON API parameters.");
return false;
}
provider = new Provider provider = new Provider
{ {
@ -144,7 +151,8 @@ public sealed record Provider(
IsEnterpriseConfiguration = true, IsEnterpriseConfiguration = true,
EnterpriseConfigurationPluginId = configPluginId, EnterpriseConfigurationPluginId = configPluginId,
Hostname = hostname, Hostname = hostname,
Host = host Host = host,
AdditionalJsonApiParameters = additionalJsonApiParameters,
}; };
return true; return true;

View File

@ -0,0 +1,84 @@
using AIStudio.Provider;
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static List<Capability> GetModelCapabilitiesAlibaba(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Qwen models:
if (modelName.StartsWith("qwen"))
{
// Check for omni models:
if (modelName.IndexOf("omni") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.AUDIO_INPUT, Capability.SPEECH_INPUT,
Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Check for Qwen 3:
if(modelName.StartsWith("qwen3"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
if(modelName.IndexOf("-vl-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
}
// QwQ models:
if (modelName.StartsWith("qwq"))
{
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
// QVQ models:
if (modelName.StartsWith("qvq"))
{
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
}
// Default to text input and output:
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
}

View File

@ -0,0 +1,49 @@
using AIStudio.Provider;
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static List<Capability> GetModelCapabilitiesAnthropic(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Claude 4.x models:
if(modelName.StartsWith("claude-opus-4") || modelName.StartsWith("claude-sonnet-4"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Claude 3.7 is able to do reasoning:
if(modelName.StartsWith("claude-3-7"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// All other 3.x models are able to process text and images as input:
if(modelName.StartsWith("claude-3-"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Any other model is able to process text only:
return [
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
}

View File

@ -0,0 +1,28 @@
using AIStudio.Provider;
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static List<Capability> GetModelCapabilitiesDeepSeek(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if(modelName.IndexOf("reasoner") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
}
}

View File

@ -0,0 +1,95 @@
using AIStudio.Provider;
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static List<Capability> GetModelCapabilitiesGoogle(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if (modelName.IndexOf("gemini-") is not -1)
{
// Reasoning models:
if (modelName.IndexOf("gemini-2.5") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Image generation:
if(modelName.IndexOf("-2.0-flash-preview-image-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.IMAGE_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Realtime model:
if(modelName.IndexOf("-2.0-flash-live-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.AUDIO_INPUT, Capability.SPEECH_INPUT,
Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// The 2.0 flash models cannot call functions:
if(modelName.IndexOf("-2.0-flash-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// The old 1.0 pro vision model:
if(modelName.IndexOf("pro-vision") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Default to all other Gemini models:
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
// Default for all other models:
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
}

View File

@ -0,0 +1,56 @@
using AIStudio.Provider;
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static List<Capability> GetModelCapabilitiesMistral(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Pixtral models are able to do process images:
if (modelName.IndexOf("pixtral") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral medium:
if (modelName.IndexOf("mistral-medium-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral small:
if (modelName.IndexOf("mistral-small-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral saba:
if (modelName.IndexOf("mistral-saba-") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Default:
return GetModelCapabilitiesOpenSource(model);
}
}

View File

@ -0,0 +1,144 @@
using AIStudio.Provider;
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static List<Capability> GetModelCapabilitiesOpenAI(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if (modelName is "gpt-4o-search-preview")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
if (modelName is "gpt-4o-mini-search-preview")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
if (modelName.StartsWith("o1-mini"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
if(modelName is "gpt-3.5-turbo")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-3.5"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
if (modelName.StartsWith("chatgpt-4o-"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o3-mini"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o4-mini") || modelName.StartsWith("o3"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.WEB_SEARCH,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o1"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-4-turbo"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if(modelName is "gpt-4" || modelName.StartsWith("gpt-4-"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-5-nano"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING,
Capability.RESPONSES_API,
];
if(modelName is "gpt-5" || modelName.StartsWith("gpt-5-"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING,
Capability.WEB_SEARCH,
Capability.RESPONSES_API,
];
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
}
}

View File

@ -1,8 +1,10 @@
namespace AIStudio.Provider; using AIStudio.Provider;
public static class CapabilitiesOpenSource namespace AIStudio.Settings;
public static partial class ProviderExtensions
{ {
public static IReadOnlyCollection<Capability> GetCapabilities(Model model) public static List<Capability> GetModelCapabilitiesOpenSource(Model model)
{ {
var modelName = model.Id.ToLowerInvariant().AsSpan(); var modelName = model.Id.ToLowerInvariant().AsSpan();

View File

@ -0,0 +1,38 @@
using AIStudio.Provider;
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static List<Capability> GetModelCapabilitiesPerplexity(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if(modelName.IndexOf("reasoning") is not -1 ||
modelName.IndexOf("deep-research") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.IMAGE_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
return
[
Capability.TEXT_INPUT,
Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.IMAGE_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
}
}

View File

@ -0,0 +1,29 @@
using AIStudio.Provider;
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static List<Capability> GetModelCapabilities(this Provider provider) => provider.UsedLLMProvider switch
{
LLMProviders.OPEN_AI => GetModelCapabilitiesOpenAI(provider.Model),
LLMProviders.MISTRAL => GetModelCapabilitiesMistral(provider.Model),
LLMProviders.ANTHROPIC => GetModelCapabilitiesAnthropic(provider.Model),
LLMProviders.GOOGLE => GetModelCapabilitiesGoogle(provider.Model),
LLMProviders.X => GetModelCapabilitiesOpenSource(provider.Model),
LLMProviders.DEEP_SEEK => GetModelCapabilitiesDeepSeek(provider.Model),
LLMProviders.ALIBABA_CLOUD => GetModelCapabilitiesAlibaba(provider.Model),
LLMProviders.PERPLEXITY => GetModelCapabilitiesPerplexity(provider.Model),
LLMProviders.GROQ => GetModelCapabilitiesOpenSource(provider.Model),
LLMProviders.FIREWORKS => GetModelCapabilitiesOpenSource(provider.Model),
LLMProviders.HUGGINGFACE => GetModelCapabilitiesOpenSource(provider.Model),
LLMProviders.HELMHOLTZ => GetModelCapabilitiesOpenSource(provider.Model),
LLMProviders.GWDG => GetModelCapabilitiesOpenSource(provider.Model),
LLMProviders.SELF_HOSTED => GetModelCapabilitiesOpenSource(provider.Model),
_ => []
};
}

View File

@ -1,3 +1,4 @@
# v0.9.53, build 228 (2025-10-xx xx:xx UTC) # v0.9.53, build 228 (2025-10-xx xx:xx UTC)
- Added expert settings to the provider dialog to enable setting additional parameters. Also, additional parameters can be configured by configuration plugins for enterprise scenarios. Thanks to Peer (`peerschuett`) for this contribution.
- Added the ability to export AI responses from the chat into Microsoft Word files. Thank you, Sabrina (`Sabrina-devops`), for your first contribution. - Added the ability to export AI responses from the chat into Microsoft Word files. Thank you, Sabrina (`Sabrina-devops`), for your first contribution.
- Removed the character limit for profiles - Removed the character limit for profiles.