Additional API parameters can now be added

This commit is contained in:
Peer Schütt 2025-10-10 17:12:59 +02:00
parent 9587a07556
commit cd17dbabf4
31 changed files with 293 additions and 515 deletions

View File

@ -72,6 +72,7 @@ public partial class SettingsPanelProviders : SettingsPanelBase
{ x => x.IsEditing, true },
{ x => x.DataHost, provider.Host },
{ x => x.HFInferenceProviderId, provider.HFInferenceProvider },
{ x => x.ExpertProviderApiParameters, provider.ExpertProviderApiParameters },
};
var dialogReference = await this.DialogService.ShowAsync<ProviderDialog>(T("Edit LLM Provider"), dialogParameters, DialogOptions.FULLSCREEN);

View File

@ -130,6 +130,16 @@
UserAttributes="@SPELLCHECK_ATTRIBUTES"
/>
<MudStack>
<MudButton OnClick="@this.ToggleProviderExpertSettings">@(this.showExpertProviderSettings ? @T("Hide Expert Settings") : @T("Show Expert Settings"))</MudButton>
<MudDivider />
<MudCollapse Expanded="@this.showExpertProviderSettings">
<MudJustifiedText>@T("Please be aware: This is for experts only. You are responsible for verifying the correctness of the additional parameters you provide to the API call.")</MudJustifiedText>
<MudJustifiedText>@T("By default, AI Studio uses the OpenAI-compatible chat/completions API, provided it is supported by the underlying service.")</MudJustifiedText>
<MudTextField T="string" Label=@T("Additional API parameters") Variant="Variant.Outlined" Lines="3" AutoGrow="true" HelperText=@T("Add the parameters in proper json formatting. E.g. \"temperature\": 0.5. Trailing commas have to be removed.") HelperTextOnFocus="true " @bind-Value="@ExpertProviderApiParameters" OnBlur="@OnInputChangeExpertSettings"></MudTextField>
</MudCollapse>
</MudStack>
</MudForm>
<Issues IssuesData="@this.dataIssues"/>
</DialogContent>

View File

@ -78,6 +78,9 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
[Parameter]
public bool IsEditing { get; init; }
[Parameter]
public string ExpertProviderApiParameters { get; set; } = string.Empty;
[Inject]
private RustService RustService { get; init; } = null!;
@ -94,6 +97,7 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
private string dataManuallyModel = string.Empty;
private string dataAPIKeyStorageIssue = string.Empty;
private string dataEditingPreviousInstanceName = string.Empty;
private bool showExpertProviderSettings = false;
// We get the form reference from Blazor code to validate it manually:
private MudForm form = null!;
@ -135,6 +139,7 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
Hostname = cleanedHostname.EndsWith('/') ? cleanedHostname[..^1] : cleanedHostname,
Host = this.DataHost,
HFInferenceProvider = this.HFInferenceProviderId,
ExpertProviderApiParameters = this.ExpertProviderApiParameters,
};
}
@ -150,6 +155,8 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
this.UsedInstanceNames = this.SettingsManager.ConfigurationData.Providers.Select(x => x.InstanceName.ToLowerInvariant()).ToList();
#pragma warning restore MWAIS0001
this.showExpertProviderSettings = !string.IsNullOrEmpty(this.ExpertProviderApiParameters);
// When editing, we need to load the data:
if(this.IsEditing)
{
@ -268,4 +275,14 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
LLMProviders.SELF_HOSTED => T("(Optional) API Key"),
_ => T("API Key"),
};
private void ToggleProviderExpertSettings()
{
this.showExpertProviderSettings = !this.showExpertProviderSettings;
}
private void OnInputChangeExpertSettings()
{
this.ExpertProviderApiParameters = this.ExpertProviderApiParameters.TrimEnd(',', ' ');
}
}

View File

@ -21,6 +21,9 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int
/// <inheritdoc />
public override string InstanceName { get; set; } = "AlibabaCloud";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -36,6 +39,9 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the AlibabaCloud HTTP chat request:
var alibabaCloudChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{
@ -63,6 +69,7 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int
}
}).ToList()],
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -141,84 +148,6 @@ public sealed class ProviderAlibabaCloud() : BaseProvider("https://dashscope-int
return this.LoadModels(["text-embedding-"], token, apiKeyProvisional).ContinueWith(t => t.Result.Concat(additionalModels).OrderBy(x => x.Id).AsEnumerable(), token);
}
/// <inheritdoc />
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Qwen models:
if (modelName.StartsWith("qwen"))
{
// Check for omni models:
if (modelName.IndexOf("omni") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.AUDIO_INPUT, Capability.SPEECH_INPUT,
Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Check for Qwen 3:
if(modelName.StartsWith("qwen3"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
if(modelName.IndexOf("-vl-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
}
// QwQ models:
if (modelName.StartsWith("qwq"))
{
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
// QVQ models:
if (modelName.StartsWith("qvq"))
{
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
}
// Default to text input and output:
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
#endregion

View File

@ -1,3 +1,4 @@
using System.Text.Json.Serialization;
using AIStudio.Provider.OpenAI;
namespace AIStudio.Provider.Anthropic;
@ -16,4 +17,9 @@ public readonly record struct ChatRequest(
int MaxTokens,
bool Stream,
string System
);
)
{
[JsonExtensionData]
public Dictionary<string, object?> AdditionalApiParameters { get; init; }
}

View File

@ -19,6 +19,9 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co
public override string InstanceName { get; set; } = "Anthropic";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -27,6 +30,9 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co
if(!requestedSecret.Success)
yield break;
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters, ["system"]);
// Prepare the Anthropic HTTP chat request:
var chatRequest = JsonSerializer.Serialize(new ChatRequest
{
@ -52,10 +58,11 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co
}).ToList()],
System = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
MaxTokens = 4_096,
MaxTokens = apiParameters["max_tokens"] as int? ?? 4_096,
// Right now, we only support streaming completions:
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -114,49 +121,6 @@ public sealed class ProviderAnthropic() : BaseProvider("https://api.anthropic.co
return Task.FromResult(Enumerable.Empty<Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Claude 4.x models:
if(modelName.StartsWith("claude-opus-4") || modelName.StartsWith("claude-sonnet-4"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Claude 3.7 is able to do reasoning:
if(modelName.StartsWith("claude-3-7"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.OPTIONAL_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// All other 3.x models are able to process text and images as input:
if(modelName.StartsWith("claude-3-"))
return [
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Any other model is able to process text only:
return [
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
#endregion
private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null)

View File

@ -40,7 +40,8 @@ public abstract class BaseProvider : IProvider, ISecretId
protected static readonly JsonSerializerOptions JSON_SERIALIZER_OPTIONS = new()
{
PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower,
Converters = { new AnnotationConverter() }
Converters = { new AnnotationConverter() },
AllowTrailingCommas = false
};
/// <summary>
@ -64,6 +65,9 @@ public abstract class BaseProvider : IProvider, ISecretId
/// <inheritdoc />
public abstract string InstanceName { get; set; }
/// <inheritdoc />
public abstract string ExpertProviderApiParameters { get; set; }
/// <inheritdoc />
public abstract IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, CancellationToken token = default);
@ -79,9 +83,6 @@ public abstract class BaseProvider : IProvider, ISecretId
/// <inheritdoc />
public abstract Task<IEnumerable<Model>> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default);
/// <inheritdoc />
public abstract IReadOnlyCollection<Capability> GetModelCapabilities(Model model);
#endregion
#region Implementation of ISecretId
@ -106,6 +107,7 @@ public abstract class BaseProvider : IProvider, ISecretId
var retry = 0;
var response = default(HttpResponseMessage);
var errorMessage = string.Empty;
var errorBody = string.Empty;
while (retry++ < MAX_RETRIES)
{
using var request = await requestBuilder();
@ -125,12 +127,12 @@ public abstract class BaseProvider : IProvider, ISecretId
break;
}
var errorBody = await nextResponse.Content.ReadAsStringAsync(token);
errorBody = await nextResponse.Content.ReadAsStringAsync(token);
if (nextResponse.StatusCode is HttpStatusCode.Forbidden)
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Block, string.Format(TB("Tried to communicate with the LLM provider '{0}'. You might not be able to use this provider from your location. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}').");
this.logger.LogDebug($"Error body: {errorBody}");
this.logger.LogError($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase;
break;
}
@ -139,7 +141,7 @@ public abstract class BaseProvider : IProvider, ISecretId
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The required message format might be changed. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}').");
this.logger.LogDebug($"Error body: {errorBody}");
this.logger.LogError($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase;
break;
}
@ -148,7 +150,7 @@ public abstract class BaseProvider : IProvider, ISecretId
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Something was not found. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}').");
this.logger.LogDebug($"Error body: {errorBody}");
this.logger.LogError($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase;
break;
}
@ -157,7 +159,7 @@ public abstract class BaseProvider : IProvider, ISecretId
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Key, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The API key might be invalid. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}').");
this.logger.LogDebug($"Error body: {errorBody}");
this.logger.LogError($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase;
break;
}
@ -166,7 +168,7 @@ public abstract class BaseProvider : IProvider, ISecretId
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The server might be down or having issues. The provider message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}').");
this.logger.LogDebug($"Error body: {errorBody}");
this.logger.LogError($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase;
break;
}
@ -175,7 +177,7 @@ public abstract class BaseProvider : IProvider, ISecretId
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. The provider is overloaded. The message is: '{1}'"), this.InstanceName, nextResponse.ReasonPhrase)));
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}').");
this.logger.LogDebug($"Error body: {errorBody}");
this.logger.LogError($"Error body: {errorBody}");
errorMessage = nextResponse.ReasonPhrase;
break;
}
@ -191,7 +193,7 @@ public abstract class BaseProvider : IProvider, ISecretId
if(retry >= MAX_RETRIES || !string.IsNullOrWhiteSpace(errorMessage))
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'"), this.InstanceName, MAX_RETRIES, errorMessage)));
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.CloudOff, string.Format(TB("Tried to communicate with the LLM provider '{0}'. Even after {1} retries, there were some problems with the request. The provider message is: '{2}'. The error body is: '{3}'"), this.InstanceName, MAX_RETRIES, errorMessage, errorBody)));
return new HttpRateLimitedStreamResult(false, true, errorMessage ?? $"Failed after {MAX_RETRIES} retries; no provider message available", response);
}
@ -522,4 +524,55 @@ public abstract class BaseProvider : IProvider, ISecretId
streamReader.Dispose();
}
/// <summary>
/// Parse and convert API parameters from a provided JSON string into a dictionary,
/// optionally merging additional parameters and removing specific keys.
/// </summary>
/// <param name="additionalUserProvidedParameters">A JSON string (without surrounding braces) containing the API parameters to be parsed.</param>
/// <param name="defaultParameters">Optional additional parameters to merge into the result. These will overwrite existing keys.</param>
/// <param name="keysToRemove">Optional list of keys to remove from the final dictionary (case-insensitive). stream, model and messages are removed by default.</param>
protected Dictionary<string, object?> ParseApiParameters(
string additionalUserProvidedParameters,
IEnumerable<string>? keysToRemove = null)
{
var json = $"{{{additionalUserProvidedParameters}}}";
var jsonDoc = JsonSerializer.Deserialize<JsonElement>(json, JSON_SERIALIZER_OPTIONS);
var dict = this.ConvertToDictionary(jsonDoc);
// Some keys are always removed because we always set them
var finalKeysToRemove = keysToRemove?.ToList() ?? new List<string>();
finalKeysToRemove.Add("stream");
finalKeysToRemove.Add("model");
finalKeysToRemove.Add("messages");
var removeSet = new HashSet<string>(finalKeysToRemove, StringComparer.OrdinalIgnoreCase);
var toRemove = dict.Keys.Where(k => removeSet.Contains(k)).ToList();
foreach (var k in toRemove)
dict.Remove(k);
return dict;
}
private Dictionary<string, object?> ConvertToDictionary(JsonElement element)
{
return element.EnumerateObject()
.ToDictionary(
p => p.Name,
p => this.ConvertJsonElement(p.Value)
);
}
private object? ConvertJsonElement(JsonElement element) => element.ValueKind switch
{
JsonValueKind.String => element.GetString(),
JsonValueKind.Number => element.TryGetInt32(out var i) ? (object)i : element.GetDouble(),
JsonValueKind.True => true,
JsonValueKind.False => false,
JsonValueKind.Object => this.ConvertToDictionary(element),
JsonValueKind.Array => element.EnumerateArray().Select(this.ConvertJsonElement).ToList(),
JsonValueKind.Null => null,
_ => element.ToString()
};
}

View File

@ -21,6 +21,9 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/
/// <inheritdoc />
public override string InstanceName { get; set; } = "DeepSeek";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -36,6 +39,9 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the DeepSeek HTTP chat request:
var deepSeekChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{
@ -63,6 +69,7 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/
}
}).ToList()],
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -108,28 +115,6 @@ public sealed class ProviderDeepSeek() : BaseProvider("https://api.deepseek.com/
return Task.FromResult(Enumerable.Empty<Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if(modelName.IndexOf("reasoner") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
}
#endregion

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.Fireworks;
/// <summary>
@ -10,4 +12,9 @@ public readonly record struct ChatRequest(
string Model,
IList<Message> Messages,
bool Stream
);
)
{
[JsonExtensionData]
public Dictionary<string, object?> AdditionalApiParameters { get; init; }
}

View File

@ -21,6 +21,9 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere
/// <inheritdoc />
public override string InstanceName { get; set; } = "Fireworks.ai";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -36,6 +39,9 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the Fireworks HTTP chat request:
var fireworksChatRequest = JsonSerializer.Serialize(new ChatRequest
{
@ -65,6 +71,7 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere
// Right now, we only support streaming completions:
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -110,7 +117,5 @@ public class ProviderFireworks() : BaseProvider("https://api.fireworks.ai/infere
return Task.FromResult(Enumerable.Empty<Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion
}

View File

@ -21,6 +21,9 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud
/// <inheritdoc />
public override string InstanceName { get; set; } = "GWDG SAIA";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -36,6 +39,9 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the GWDG HTTP chat request:
var gwdgChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{
@ -63,6 +69,7 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud
}
}).ToList()],
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -110,8 +117,6 @@ public sealed class ProviderGWDG() : BaseProvider("https://chat-ai.academiccloud
return models.Where(model => model.Id.StartsWith("e5-", StringComparison.InvariantCultureIgnoreCase));
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion
private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null)

View File

@ -1,3 +1,4 @@
using System.Text.Json.Serialization;
using AIStudio.Provider.OpenAI;
namespace AIStudio.Provider.Google;
@ -12,4 +13,9 @@ public readonly record struct ChatRequest(
string Model,
IList<Message> Messages,
bool Stream
);
)
{
[JsonExtensionData]
public Dictionary<string, object?> AdditionalApiParameters { get; init; }
}

View File

@ -21,6 +21,9 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea
/// <inheritdoc />
public override string InstanceName { get; set; } = "Google Gemini";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Provider.Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -36,6 +39,9 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the Google HTTP chat request:
var geminiChatRequest = JsonSerializer.Serialize(new ChatRequest
{
@ -65,6 +71,7 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea
// Right now, we only support streaming completions:
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -122,94 +129,6 @@ public class ProviderGoogle() : BaseProvider("https://generativelanguage.googlea
.Select(n => new Provider.Model(n.Name.Replace("models/", string.Empty), n.DisplayName));
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if (modelName.IndexOf("gemini-") is not -1)
{
// Reasoning models:
if (modelName.IndexOf("gemini-2.5") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Image generation:
if(modelName.IndexOf("-2.0-flash-preview-image-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.IMAGE_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Realtime model:
if(modelName.IndexOf("-2.0-flash-live-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.AUDIO_INPUT, Capability.SPEECH_INPUT,
Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT, Capability.SPEECH_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// The 2.0 flash models cannot call functions:
if(modelName.IndexOf("-2.0-flash-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// The old 1.0 pro vision model:
if(modelName.IndexOf("pro-vision") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Default to all other Gemini models:
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, Capability.AUDIO_INPUT,
Capability.SPEECH_INPUT, Capability.VIDEO_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
// Default for all other models:
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
}
#endregion

View File

@ -1,3 +1,4 @@
using System.Text.Json.Serialization;
using AIStudio.Provider.OpenAI;
namespace AIStudio.Provider.Groq;
@ -14,4 +15,9 @@ public readonly record struct ChatRequest(
IList<Message> Messages,
bool Stream,
int Seed
);
)
{
[JsonExtensionData]
public Dictionary<string, object?> AdditionalApiParameters { get; init; }
}

View File

@ -21,6 +21,9 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO
/// <inheritdoc />
public override string InstanceName { get; set; } = "Groq";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -36,6 +39,9 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the OpenAI HTTP chat request:
var groqChatRequest = JsonSerializer.Serialize(new ChatRequest
{
@ -65,6 +71,7 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO
// Right now, we only support streaming completions:
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -110,8 +117,6 @@ public class ProviderGroq() : BaseProvider("https://api.groq.com/openai/v1/", LO
return Task.FromResult(Enumerable.Empty<Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion
private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null)

View File

@ -21,6 +21,9 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl
/// <inheritdoc />
public override string InstanceName { get; set; } = "Helmholtz Blablador";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -36,6 +39,9 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the Helmholtz HTTP chat request:
var helmholtzChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{
@ -63,6 +69,7 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl
}
}).ToList()],
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -114,8 +121,6 @@ public sealed class ProviderHelmholtz() : BaseProvider("https://api.helmholtz-bl
model.Id.Contains("gritlm", StringComparison.InvariantCultureIgnoreCase));
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion
private async Task<IEnumerable<Model>> LoadModels(CancellationToken token, string? apiKeyProvisional = null)

View File

@ -26,6 +26,9 @@ public sealed class ProviderHuggingFace : BaseProvider
/// <inheritdoc />
public override string InstanceName { get; set; } = "HuggingFace";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -41,6 +44,9 @@ public sealed class ProviderHuggingFace : BaseProvider
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the HuggingFace HTTP chat request:
var huggingfaceChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{
@ -68,6 +74,7 @@ public sealed class ProviderHuggingFace : BaseProvider
}
}).ToList()],
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -113,7 +120,5 @@ public sealed class ProviderHuggingFace : BaseProvider
return Task.FromResult(Enumerable.Empty<Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion
}

View File

@ -19,6 +19,8 @@ public interface IProvider
/// </summary>
public string InstanceName { get; }
public string ExpertProviderApiParameters { get; }
/// <summary>
/// Starts a chat completion stream.
/// </summary>
@ -64,10 +66,4 @@ public interface IProvider
/// <returns>The list of embedding models.</returns>
public Task<IEnumerable<Model>> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default);
/// <summary>
/// Get the capabilities of a model.
/// </summary>
/// <param name="model">The model to get the capabilities for.</param>
/// <returns>The capabilities of the model.</returns>
public IReadOnlyCollection<Capability> GetModelCapabilities(Model model);
}

View File

@ -144,7 +144,7 @@ public static class LLMProvidersExtensions
/// <returns>The provider instance.</returns>
public static IProvider CreateProvider(this AIStudio.Settings.Provider providerSettings)
{
return providerSettings.UsedLLMProvider.CreateProvider(providerSettings.InstanceName, providerSettings.Host, providerSettings.Hostname, providerSettings.Model, providerSettings.HFInferenceProvider);
return providerSettings.UsedLLMProvider.CreateProvider(providerSettings.InstanceName, providerSettings.Host, providerSettings.Hostname, providerSettings.Model, providerSettings.HFInferenceProvider, providerSettings.ExpertProviderApiParameters);
}
/// <summary>
@ -157,29 +157,29 @@ public static class LLMProvidersExtensions
return embeddingProviderSettings.UsedLLMProvider.CreateProvider(embeddingProviderSettings.Name, embeddingProviderSettings.Host, embeddingProviderSettings.Hostname, embeddingProviderSettings.Model, HFInferenceProvider.NONE);
}
private static IProvider CreateProvider(this LLMProviders provider, string instanceName, Host host, string hostname, Model model, HFInferenceProvider inferenceProvider)
private static IProvider CreateProvider(this LLMProviders provider, string instanceName, Host host, string hostname, Model model, HFInferenceProvider inferenceProvider, string expertProviderApiParameter = "")
{
try
{
return provider switch
{
LLMProviders.OPEN_AI => new ProviderOpenAI { InstanceName = instanceName },
LLMProviders.ANTHROPIC => new ProviderAnthropic { InstanceName = instanceName },
LLMProviders.MISTRAL => new ProviderMistral { InstanceName = instanceName },
LLMProviders.GOOGLE => new ProviderGoogle { InstanceName = instanceName },
LLMProviders.X => new ProviderX { InstanceName = instanceName },
LLMProviders.DEEP_SEEK => new ProviderDeepSeek { InstanceName = instanceName },
LLMProviders.ALIBABA_CLOUD => new ProviderAlibabaCloud { InstanceName = instanceName },
LLMProviders.PERPLEXITY => new ProviderPerplexity { InstanceName = instanceName },
LLMProviders.OPEN_AI => new ProviderOpenAI { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.ANTHROPIC => new ProviderAnthropic { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.MISTRAL => new ProviderMistral { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.GOOGLE => new ProviderGoogle { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.X => new ProviderX { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.DEEP_SEEK => new ProviderDeepSeek { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.ALIBABA_CLOUD => new ProviderAlibabaCloud { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.PERPLEXITY => new ProviderPerplexity { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.GROQ => new ProviderGroq { InstanceName = instanceName },
LLMProviders.FIREWORKS => new ProviderFireworks { InstanceName = instanceName },
LLMProviders.HUGGINGFACE => new ProviderHuggingFace(inferenceProvider, model) { InstanceName = instanceName },
LLMProviders.GROQ => new ProviderGroq { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.FIREWORKS => new ProviderFireworks { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.HUGGINGFACE => new ProviderHuggingFace(inferenceProvider, model) { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.SELF_HOSTED => new ProviderSelfHosted(host, hostname) { InstanceName = instanceName },
LLMProviders.SELF_HOSTED => new ProviderSelfHosted(host, hostname) { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.HELMHOLTZ => new ProviderHelmholtz { InstanceName = instanceName },
LLMProviders.GWDG => new ProviderGWDG { InstanceName = instanceName },
LLMProviders.HELMHOLTZ => new ProviderHelmholtz { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
LLMProviders.GWDG => new ProviderGWDG { InstanceName = instanceName, ExpertProviderApiParameters = expertProviderApiParameter },
_ => new NoProvider(),
};

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.Mistral;
/// <summary>
@ -14,4 +16,9 @@ public readonly record struct ChatRequest(
bool Stream,
int RandomSeed,
bool SafePrompt = false
);
)
{
[JsonExtensionData]
public Dictionary<string, object?> AdditionalApiParameters { get; init; }
}

View File

@ -19,6 +19,9 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/
public override string InstanceName { get; set; } = "Mistral";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Provider.Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -34,6 +37,9 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the Mistral HTTP chat request:
var mistralChatRequest = JsonSerializer.Serialize(new ChatRequest
{
@ -63,7 +69,8 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/
// Right now, we only support streaming completions:
Stream = true,
SafePrompt = false,
SafePrompt = apiParameters["safe_prompt"] as bool? ?? false,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -122,56 +129,6 @@ public sealed class ProviderMistral() : BaseProvider("https://api.mistral.ai/v1/
return Task.FromResult(Enumerable.Empty<Provider.Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
// Pixtral models are able to do process images:
if (modelName.IndexOf("pixtral") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral medium:
if (modelName.IndexOf("mistral-medium-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral small:
if (modelName.IndexOf("mistral-small-") is not -1)
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.CHAT_COMPLETION_API,
];
// Mistral saba:
if (modelName.IndexOf("mistral-saba-") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
// Default:
return CapabilitiesOpenSource.GetCapabilities(model);
}
#endregion
private async Task<ModelsResponse> LoadModelList(string? apiKeyProvisional, CancellationToken token)

View File

@ -13,6 +13,9 @@ public class NoProvider : IProvider
public string InstanceName { get; set; } = "None";
/// <inheritdoc />
public string ExpertProviderApiParameters { get; set; } = string.Empty;
public Task<IEnumerable<Model>> GetTextModels(string? apiKeyProvisional = null, CancellationToken token = default) => Task.FromResult<IEnumerable<Model>>([]);
public Task<IEnumerable<Model>> GetImageModels(string? apiKeyProvisional = null, CancellationToken token = default) => Task.FromResult<IEnumerable<Model>>([]);

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.OpenAI;
/// <summary>
@ -15,4 +17,7 @@ public record ChatCompletionAPIRequest(
public ChatCompletionAPIRequest() : this(string.Empty, [], true)
{
}
[JsonExtensionData]
public Dictionary<string, object?>? AdditionalApiParameters { get; init; }
}

View File

@ -23,6 +23,10 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
/// <inheritdoc />
public override string InstanceName { get; set; } = "OpenAI";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -59,7 +63,7 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
};
// Read the model capabilities:
var modelCapabilities = this.GetModelCapabilities(chatModel);
var modelCapabilities = Settings.ProviderExtensions.GetModelCapabilitiesOpenAI(chatModel);
// Check if we are using the Responses API or the Chat Completion API:
var usingResponsesAPI = modelCapabilities.Contains(Capability.RESPONSES_API);
@ -85,6 +89,10 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
_ => []
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters, ["input", "store", "tools"]);
//
// Create the request: either for the Responses API or the Chat Completion API
//
@ -119,6 +127,7 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
// Right now, we only support streaming completions:
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS),
// Responses API request:
@ -157,6 +166,9 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
// Tools we want to use:
Tools = tools,
// Additional api parameters:
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS),
};
@ -215,144 +227,6 @@ public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"
return this.LoadModels(["text-embedding-"], token, apiKeyProvisional);
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if (modelName is "gpt-4o-search-preview")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
if (modelName is "gpt-4o-mini-search-preview")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
if (modelName.StartsWith("o1-mini"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.CHAT_COMPLETION_API,
];
if(modelName is "gpt-3.5-turbo")
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-3.5"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.CHAT_COMPLETION_API,
];
if (modelName.StartsWith("chatgpt-4o-"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o3-mini"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o4-mini") || modelName.StartsWith("o3"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.WEB_SEARCH,
Capability.RESPONSES_API,
];
if (modelName.StartsWith("o1"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.ALWAYS_REASONING, Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-4-turbo"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
if(modelName is "gpt-4" || modelName.StartsWith("gpt-4-"))
return
[
Capability.TEXT_INPUT,
Capability.TEXT_OUTPUT,
Capability.RESPONSES_API,
];
if(modelName.StartsWith("gpt-5-nano"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING,
Capability.RESPONSES_API,
];
if(modelName is "gpt-5" || modelName.StartsWith("gpt-5-"))
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING, Capability.ALWAYS_REASONING,
Capability.WEB_SEARCH,
Capability.RESPONSES_API,
];
return
[
Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.FUNCTION_CALLING,
Capability.RESPONSES_API,
];
}
#endregion
private async Task<IEnumerable<Model>> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null)

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.OpenAI;
/// <summary>
@ -18,4 +20,7 @@ public record ResponsesAPIRequest(
public ResponsesAPIRequest() : this(string.Empty, [], true, false, [])
{
}
[JsonExtensionData]
public Dictionary<string, object?>? AdditionalApiParameters { get; init; }
}

View File

@ -30,6 +30,10 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity.
/// <inheritdoc />
public override string InstanceName { get; set; } = "Perplexity";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -45,6 +49,9 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity.
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the Perplexity HTTP chat request:
var perplexityChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{
@ -72,6 +79,7 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity.
}
}).ToList()],
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -117,38 +125,6 @@ public sealed class ProviderPerplexity() : BaseProvider("https://api.perplexity.
return Task.FromResult(Enumerable.Empty<Model>());
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();
if(modelName.IndexOf("reasoning") is not -1 ||
modelName.IndexOf("deep-research") is not -1)
return
[
Capability.TEXT_INPUT,
Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.IMAGE_OUTPUT,
Capability.ALWAYS_REASONING,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
return
[
Capability.TEXT_INPUT,
Capability.MULTIPLE_IMAGE_INPUT,
Capability.TEXT_OUTPUT,
Capability.IMAGE_OUTPUT,
Capability.WEB_SEARCH,
Capability.CHAT_COMPLETION_API,
];
}
#endregion
private Task<IEnumerable<Model>> LoadModels() => Task.FromResult<IEnumerable<Model>>(KNOWN_MODELS);

View File

@ -1,3 +1,5 @@
using System.Text.Json.Serialization;
namespace AIStudio.Provider.SelfHosted;
/// <summary>
@ -10,4 +12,9 @@ public readonly record struct ChatRequest(
string Model,
IList<Message> Messages,
bool Stream
);
)
{
[JsonExtensionData]
public Dictionary<string, object?> AdditionalApiParameters { get; init; }
}

View File

@ -19,6 +19,9 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
public override string InstanceName { get; set; } = "Self-hosted";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Provider.Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -32,6 +35,9 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the OpenAI HTTP chat request:
var providerChatRequest = JsonSerializer.Serialize(new ChatRequest
{
@ -60,7 +66,8 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
}).ToList()],
// Right now, we only support streaming completions:
Stream = true
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -142,8 +149,6 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
}
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Provider.Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion
private async Task<IEnumerable<Provider.Model>> LoadModels(string[] ignorePhrases, string[] filterPhrases, CancellationToken token, string? apiKeyProvisional = null)

View File

@ -21,6 +21,9 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER)
/// <inheritdoc />
public override string InstanceName { get; set; } = "xAI";
/// <inheritdoc />
public override string ExpertProviderApiParameters { get; set; } = string.Empty;
/// <inheritdoc />
public override async IAsyncEnumerable<ContentStreamChunk> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
@ -36,6 +39,9 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER)
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread),
};
// Parse the API parameters:
var apiParameters = this.ParseApiParameters(this.ExpertProviderApiParameters);
// Prepare the xAI HTTP chat request:
var xChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest
{
@ -65,6 +71,7 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER)
// Right now, we only support streaming completions:
Stream = true,
AdditionalApiParameters = apiParameters
}, JSON_SERIALIZER_OPTIONS);
async Task<HttpRequestMessage> RequestBuilder()
@ -111,8 +118,6 @@ public sealed class ProviderX() : BaseProvider("https://api.x.ai/v1/", LOGGER)
return Task.FromResult<IEnumerable<Model>>([]);
}
public override IReadOnlyCollection<Capability> GetModelCapabilities(Model model) => CapabilitiesOpenSource.GetCapabilities(model);
#endregion
private async Task<IEnumerable<Model>> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null)

View File

@ -31,7 +31,8 @@ public sealed record Provider(
Guid EnterpriseConfigurationPluginId = default,
string Hostname = "http://localhost:1234",
Host Host = Host.NONE,
HFInferenceProvider HFInferenceProvider = HFInferenceProvider.NONE) : ConfigurationBaseObject, ISecretId
HFInferenceProvider HFInferenceProvider = HFInferenceProvider.NONE,
string ExpertProviderApiParameters = "") : ConfigurationBaseObject, ISecretId
{
private static readonly ILogger<Provider> LOGGER = Program.LOGGER_FACTORY.CreateLogger<Provider>();
@ -133,6 +134,12 @@ public sealed record Provider(
return false;
}
if (!table.TryGetValue("ExpertProviderApiParameters", out var expertProviderApiParametersValue) || !expertProviderApiParametersValue.TryRead<string>(out var expertProviderApiParameters))
{
LOGGER.LogWarning($"The configured provider {idx} does not contain valid additional api parameters.");
return false;
}
provider = new Provider
{
Num = 0,
@ -144,7 +151,8 @@ public sealed record Provider(
IsEnterpriseConfiguration = true,
EnterpriseConfigurationPluginId = configPluginId,
Hostname = hostname,
Host = host
Host = host,
ExpertProviderApiParameters = expertProviderApiParameters,
};
return true;

View File

@ -1,8 +1,10 @@
namespace AIStudio.Provider;
using AIStudio.Provider;
public static class CapabilitiesOpenSource
namespace AIStudio.Settings;
public static partial class ProviderExtensions
{
public static IReadOnlyCollection<Capability> GetCapabilities(Model model)
public static List<Capability> GetModelCapabilitiesOpenSource(Model model)
{
var modelName = model.Id.ToLowerInvariant().AsSpan();