From a39c6c330f96dbd510d7c07c1c8726cb7e7d8493 Mon Sep 17 00:00:00 2001 From: Thorsten Sommer Date: Mon, 15 Dec 2025 19:31:51 +0100 Subject: [PATCH] Added the OpenRouter provider (#595) --- README.md | 5 +- app/MindWork AI Studio/Pages/Home.razor.cs | 2 +- .../Provider/LLMProviders.cs | 3 +- .../Provider/LLMProvidersExtensions.cs | 29 +- .../Provider/OpenRouter/OpenRouterModel.cs | 8 + .../OpenRouter/OpenRouterModelsResponse.cs | 7 + .../Provider/OpenRouter/ProviderOpenRouter.cs | 202 ++++++++++++++ .../Settings/ProviderExtensions.OpenRouter.cs | 249 ++++++++++++++++++ .../Settings/ProviderExtensions.cs | 3 +- .../wwwroot/changelog/v0.9.55.md | 1 + 10 files changed, 495 insertions(+), 14 deletions(-) create mode 100644 app/MindWork AI Studio/Provider/OpenRouter/OpenRouterModel.cs create mode 100644 app/MindWork AI Studio/Provider/OpenRouter/OpenRouterModelsResponse.cs create mode 100644 app/MindWork AI Studio/Provider/OpenRouter/ProviderOpenRouter.cs create mode 100644 app/MindWork AI Studio/Settings/ProviderExtensions.OpenRouter.cs diff --git a/README.md b/README.md index d526d2b3..8a4d5d87 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ Since November 2024: Work on RAG (integration of your data and files) has begun. - [x] ~~App: Implement dialog for checking & handling [pandoc](https://pandoc.org/) installation ([PR #393](https://github.com/MindWorkAI/AI-Studio/pull/393), [PR #487](https://github.com/MindWorkAI/AI-Studio/pull/487))~~ - [ ] App: Implement external embedding providers - [ ] App: Implement the process to vectorize one local file using embeddings -- [ ] Runtime: Integration of the vector database [LanceDB](https://github.com/lancedb/lancedb) +- [ ] Runtime: Integration of the vector database [Qdrant](https://github.com/qdrant/qdrant) - [ ] App: Implement the continuous process of vectorizing data - [x] ~~App: Define a common retrieval context interface for the integration of RAG processes in chats (PR [#281](https://github.com/MindWorkAI/AI-Studio/pull/281), [#284](https://github.com/MindWorkAI/AI-Studio/pull/284), [#286](https://github.com/MindWorkAI/AI-Studio/pull/286), [#287](https://github.com/MindWorkAI/AI-Studio/pull/287))~~ - [x] ~~App: Define a common augmentation interface for the integration of RAG processes in chats (PR [#288](https://github.com/MindWorkAI/AI-Studio/pull/288), [#289](https://github.com/MindWorkAI/AI-Studio/pull/289))~~ @@ -78,7 +78,7 @@ Since March 2025: We have started developing the plugin system. There will be la Features we have recently released - +- v0.9.55: Added support for newer models like Mistral 3 & GPT 5.2, OpenRouter as LLM and embedding provider, and the possibility to use file attachments in chats. - v0.9.51: Added support for [Perplexity](https://www.perplexity.ai/); citations added so that LLMs can provide source references (e.g., some OpenAI models, Perplexity); added support for OpenAI's Responses API so that all text LLMs from OpenAI now work in MindWork AI Studio, including Deep Research models; web searches are now possible (some OpenAI models, Perplexity). - v0.9.50: Added support for self-hosted LLMs using [vLLM](https://blog.vllm.ai/2023/06/20/vllm.html). - v0.9.46: Released our plugin system, a German language plugin, early support for enterprise environments, and configuration plugins. Additionally, we added the Pandoc integration for future data processing and file generation. @@ -114,6 +114,7 @@ MindWork AI Studio is a free desktop app for macOS, Windows, and Linux. It provi - [xAI](https://x.ai/) (Grok) - [DeepSeek](https://www.deepseek.com/en) - [Alibaba Cloud](https://www.alibabacloud.com) (Qwen) + - [OpenRouter](https://openrouter.ai/) - [Hugging Face](https://huggingface.co/) using their [inference providers](https://huggingface.co/docs/inference-providers/index) such as Cerebras, Nebius, Sambanova, Novita, Hyperbolic, Together AI, Fireworks, Hugging Face - Self-hosted models using [llama.cpp](https://github.com/ggerganov/llama.cpp), [ollama](https://github.com/ollama/ollama), [LM Studio](https://lmstudio.ai/), and [vLLM](https://github.com/vllm-project/vllm) - [Groq](https://groq.com/) diff --git a/app/MindWork AI Studio/Pages/Home.razor.cs b/app/MindWork AI Studio/Pages/Home.razor.cs index bdd46e06..7facd6e3 100644 --- a/app/MindWork AI Studio/Pages/Home.razor.cs +++ b/app/MindWork AI Studio/Pages/Home.razor.cs @@ -31,7 +31,7 @@ public partial class Home : MSGComponentBase { this.itemsAdvantages = [ new(this.T("Free of charge"), this.T("The app is free to use, both for personal and commercial purposes.")), - new(this.T("Independence"), this.T("You are not tied to any single provider. Instead, you might choose the provider that best suits your needs. Right now, we support OpenAI (GPT5, o1, etc.), Perplexity, Mistral, Anthropic (Claude), Google Gemini, xAI (Grok), DeepSeek, Alibaba Cloud (Qwen), Hugging Face, and self-hosted models using vLLM, llama.cpp, ollama, LM Studio, Groq, or Fireworks. For scientists and employees of research institutions, we also support Helmholtz and GWDG AI services. These are available through federated logins like eduGAIN to all 18 Helmholtz Centers, the Max Planck Society, most German, and many international universities.")), + new(this.T("Independence"), this.T("You are not tied to any single provider. Instead, you might choose the provider that best suits your needs. Right now, we support OpenAI (GPT5, o1, etc.), Perplexity, Mistral, Anthropic (Claude), Google Gemini, xAI (Grok), DeepSeek, Alibaba Cloud (Qwen), OpenRouter, Hugging Face, and self-hosted models using vLLM, llama.cpp, ollama, LM Studio, Groq, or Fireworks. For scientists and employees of research institutions, we also support Helmholtz and GWDG AI services. These are available through federated logins like eduGAIN to all 18 Helmholtz Centers, the Max Planck Society, most German, and many international universities.")), new(this.T("Assistants"), this.T("You just want to quickly translate a text? AI Studio has so-called assistants for such and other tasks. No prompting is necessary when working with these assistants.")), new(this.T("Unrestricted usage"), this.T("Unlike services like ChatGPT, which impose limits after intensive use, MindWork AI Studio offers unlimited usage through the providers API.")), new(this.T("Cost-effective"), this.T("You only pay for what you use, which can be cheaper than monthly subscription services like ChatGPT Plus, especially if used infrequently. But beware, here be dragons: For extremely intensive usage, the API costs can be significantly higher. Unfortunately, providers currently do not offer a way to display current costs in the app. Therefore, check your account with the respective provider to see how your costs are developing. When available, use prepaid and set a cost limit.")), diff --git a/app/MindWork AI Studio/Provider/LLMProviders.cs b/app/MindWork AI Studio/Provider/LLMProviders.cs index f23cd876..6a560036 100644 --- a/app/MindWork AI Studio/Provider/LLMProviders.cs +++ b/app/MindWork AI Studio/Provider/LLMProviders.cs @@ -15,7 +15,8 @@ public enum LLMProviders DEEP_SEEK = 11, ALIBABA_CLOUD = 12, PERPLEXITY = 14, - + OPEN_ROUTER = 15, + FIREWORKS = 5, GROQ = 6, HUGGINGFACE = 13, diff --git a/app/MindWork AI Studio/Provider/LLMProvidersExtensions.cs b/app/MindWork AI Studio/Provider/LLMProvidersExtensions.cs index 52fe30a1..91dfba8e 100644 --- a/app/MindWork AI Studio/Provider/LLMProvidersExtensions.cs +++ b/app/MindWork AI Studio/Provider/LLMProvidersExtensions.cs @@ -9,6 +9,7 @@ using AIStudio.Provider.Helmholtz; using AIStudio.Provider.HuggingFace; using AIStudio.Provider.Mistral; using AIStudio.Provider.OpenAI; +using AIStudio.Provider.OpenRouter; using AIStudio.Provider.Perplexity; using AIStudio.Provider.SelfHosted; using AIStudio.Provider.X; @@ -42,7 +43,8 @@ public static class LLMProvidersExtensions LLMProviders.DEEP_SEEK => "DeepSeek", LLMProviders.ALIBABA_CLOUD => "Alibaba Cloud", LLMProviders.PERPLEXITY => "Perplexity", - + LLMProviders.OPEN_ROUTER => "OpenRouter", + LLMProviders.GROQ => "Groq", LLMProviders.FIREWORKS => "Fireworks.ai", LLMProviders.HUGGINGFACE => "Hugging Face", @@ -92,7 +94,9 @@ public static class LLMProvidersExtensions LLMProviders.ALIBABA_CLOUD => Confidence.CHINA_NO_TRAINING.WithRegion("Asia").WithSources("https://www.alibabacloud.com/help/en/model-studio/support/faq-about-alibaba-cloud-model-studio").WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)), LLMProviders.PERPLEXITY => Confidence.USA_NO_TRAINING.WithRegion("America, U.S.").WithSources("https://www.perplexity.ai/hub/legal/perplexity-api-terms-of-service").WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)), - + + LLMProviders.OPEN_ROUTER => Confidence.USA_HUB.WithRegion("America, U.S.").WithSources("https://openrouter.ai/privacy", "https://openrouter.ai/terms").WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)), + LLMProviders.SELF_HOSTED => Confidence.SELF_HOSTED.WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)), LLMProviders.HELMHOLTZ => Confidence.GDPR_NO_TRAINING.WithRegion("Europe, Germany").WithSources("https://helmholtz.cloud/services/?serviceID=d7d5c597-a2f6-4bd1-b71e-4d6499d98570").WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)), @@ -128,7 +132,8 @@ public static class LLMProvidersExtensions LLMProviders.DEEP_SEEK => false, LLMProviders.HUGGINGFACE => false, LLMProviders.PERPLEXITY => false, - + LLMProviders.OPEN_ROUTER => true, + // // Self-hosted providers are treated as a special case anyway. // @@ -171,7 +176,8 @@ public static class LLMProvidersExtensions LLMProviders.DEEP_SEEK => new ProviderDeepSeek { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, LLMProviders.ALIBABA_CLOUD => new ProviderAlibabaCloud { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, LLMProviders.PERPLEXITY => new ProviderPerplexity { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, - + LLMProviders.OPEN_ROUTER => new ProviderOpenRouter { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, + LLMProviders.GROQ => new ProviderGroq { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, LLMProviders.FIREWORKS => new ProviderFireworks { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, LLMProviders.HUGGINGFACE => new ProviderHuggingFace(inferenceProvider, model) { InstanceName = instanceName, AdditionalJsonApiParameters = expertProviderApiParameter }, @@ -201,7 +207,8 @@ public static class LLMProvidersExtensions LLMProviders.DEEP_SEEK => "https://platform.deepseek.com/sign_up", LLMProviders.ALIBABA_CLOUD => "https://account.alibabacloud.com/register/intl_register.htm", LLMProviders.PERPLEXITY => "https://www.perplexity.ai/account/api", - + LLMProviders.OPEN_ROUTER => "https://openrouter.ai/keys", + LLMProviders.GROQ => "https://console.groq.com/", LLMProviders.FIREWORKS => "https://fireworks.ai/login", LLMProviders.HUGGINGFACE => "https://huggingface.co/login", @@ -224,8 +231,9 @@ public static class LLMProvidersExtensions LLMProviders.DEEP_SEEK => "https://platform.deepseek.com/usage", LLMProviders.ALIBABA_CLOUD => "https://usercenter2-intl.aliyun.com/billing", LLMProviders.PERPLEXITY => "https://www.perplexity.ai/account/api/", + LLMProviders.OPEN_ROUTER => "https://openrouter.ai/activity", LLMProviders.HUGGINGFACE => "https://huggingface.co/settings/billing", - + _ => string.Empty, }; @@ -241,8 +249,9 @@ public static class LLMProvidersExtensions LLMProviders.DEEP_SEEK => true, LLMProviders.ALIBABA_CLOUD => true, LLMProviders.PERPLEXITY => true, + LLMProviders.OPEN_ROUTER => true, LLMProviders.HUGGINGFACE => true, - + _ => false, }; @@ -288,7 +297,8 @@ public static class LLMProvidersExtensions LLMProviders.DEEP_SEEK => true, LLMProviders.ALIBABA_CLOUD => true, LLMProviders.PERPLEXITY => true, - + LLMProviders.OPEN_ROUTER => true, + LLMProviders.GROQ => true, LLMProviders.FIREWORKS => true, LLMProviders.HELMHOLTZ => true, @@ -310,7 +320,8 @@ public static class LLMProvidersExtensions LLMProviders.DEEP_SEEK => true, LLMProviders.ALIBABA_CLOUD => true, LLMProviders.PERPLEXITY => true, - + LLMProviders.OPEN_ROUTER => true, + LLMProviders.GROQ => true, LLMProviders.FIREWORKS => true, LLMProviders.HELMHOLTZ => true, diff --git a/app/MindWork AI Studio/Provider/OpenRouter/OpenRouterModel.cs b/app/MindWork AI Studio/Provider/OpenRouter/OpenRouterModel.cs new file mode 100644 index 00000000..7cd47a59 --- /dev/null +++ b/app/MindWork AI Studio/Provider/OpenRouter/OpenRouterModel.cs @@ -0,0 +1,8 @@ +namespace AIStudio.Provider.OpenRouter; + +/// +/// A data model for an OpenRouter model from the API. +/// +/// The model's ID. +/// The model's human-readable display name. +public readonly record struct OpenRouterModel(string Id, string? Name); diff --git a/app/MindWork AI Studio/Provider/OpenRouter/OpenRouterModelsResponse.cs b/app/MindWork AI Studio/Provider/OpenRouter/OpenRouterModelsResponse.cs new file mode 100644 index 00000000..0680c4e6 --- /dev/null +++ b/app/MindWork AI Studio/Provider/OpenRouter/OpenRouterModelsResponse.cs @@ -0,0 +1,7 @@ +namespace AIStudio.Provider.OpenRouter; + +/// +/// A data model for the response from the OpenRouter models endpoint. +/// +/// The list of models. +public readonly record struct OpenRouterModelsResponse(IList Data); diff --git a/app/MindWork AI Studio/Provider/OpenRouter/ProviderOpenRouter.cs b/app/MindWork AI Studio/Provider/OpenRouter/ProviderOpenRouter.cs new file mode 100644 index 00000000..2b5e5780 --- /dev/null +++ b/app/MindWork AI Studio/Provider/OpenRouter/ProviderOpenRouter.cs @@ -0,0 +1,202 @@ +using System.Net.Http.Headers; +using System.Runtime.CompilerServices; +using System.Text; +using System.Text.Json; + +using AIStudio.Chat; +using AIStudio.Provider.OpenAI; +using AIStudio.Settings; + +namespace AIStudio.Provider.OpenRouter; + +public sealed class ProviderOpenRouter() : BaseProvider("https://openrouter.ai/api/v1/", LOGGER) +{ + private const string PROJECT_WEBSITE = "https://github.com/MindWorkAI/AI-Studio"; + private const string PROJECT_NAME = "MindWork AI Studio"; + + private static readonly ILogger LOGGER = Program.LOGGER_FACTORY.CreateLogger(); + + #region Implementation of IProvider + + /// + public override string Id => LLMProviders.OPEN_ROUTER.ToName(); + + /// + public override string InstanceName { get; set; } = "OpenRouter"; + + /// + public override async IAsyncEnumerable StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default) + { + // Get the API key: + var requestedSecret = await RUST_SERVICE.GetAPIKey(this); + if(!requestedSecret.Success) + yield break; + + // Prepare the system prompt: + var systemPrompt = new Message + { + Role = "system", + Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread), + }; + + // Parse the API parameters: + var apiParameters = this.ParseAdditionalApiParameters(); + + // Build the list of messages: + var messages = await chatThread.Blocks.BuildMessages(async n => new Message + { + Role = n.Role switch + { + ChatRole.USER => "user", + ChatRole.AI => "assistant", + ChatRole.AGENT => "assistant", + ChatRole.SYSTEM => "system", + + _ => "user", + }, + + Content = n.Content switch + { + ContentText text => await text.PrepareContentForAI(), + _ => string.Empty, + } + }); + + // Prepare the OpenRouter HTTP chat request: + var openRouterChatRequest = JsonSerializer.Serialize(new ChatCompletionAPIRequest + { + Model = chatModel.Id, + + // Build the messages: + // - First of all the system prompt + // - Then none-empty user and AI messages + Messages = [systemPrompt, ..messages], + + // Right now, we only support streaming completions: + Stream = true, + AdditionalApiParameters = apiParameters + }, JSON_SERIALIZER_OPTIONS); + + async Task RequestBuilder() + { + // Build the HTTP post request: + var request = new HttpRequestMessage(HttpMethod.Post, "chat/completions"); + + // Set the authorization header: + request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", await requestedSecret.Secret.Decrypt(ENCRYPTION)); + + // Set custom headers for project identification: + request.Headers.Add("HTTP-Referer", PROJECT_WEBSITE); + request.Headers.Add("X-Title", PROJECT_NAME); + + // Set the content: + request.Content = new StringContent(openRouterChatRequest, Encoding.UTF8, "application/json"); + return request; + } + + await foreach (var content in this.StreamChatCompletionInternal("OpenRouter", RequestBuilder, token)) + yield return content; + } + + #pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously + /// + public override async IAsyncEnumerable StreamImageCompletion(Model imageModel, string promptPositive, string promptNegative = FilterOperator.String.Empty, ImageURL referenceImageURL = default, [EnumeratorCancellation] CancellationToken token = default) + { + yield break; + } + #pragma warning restore CS1998 // Async method lacks 'await' operators and will run synchronously + + /// + public override Task> GetTextModels(string? apiKeyProvisional = null, CancellationToken token = default) + { + return this.LoadModels(token, apiKeyProvisional); + } + + /// + public override Task> GetImageModels(string? apiKeyProvisional = null, CancellationToken token = default) + { + return Task.FromResult(Enumerable.Empty()); + } + + /// + public override Task> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default) + { + return this.LoadEmbeddingModels(token, apiKeyProvisional); + } + + #endregion + + private async Task> LoadModels(CancellationToken token, string? apiKeyProvisional = null) + { + var secretKey = apiKeyProvisional switch + { + not null => apiKeyProvisional, + _ => await RUST_SERVICE.GetAPIKey(this) switch + { + { Success: true } result => await result.Secret.Decrypt(ENCRYPTION), + _ => null, + } + }; + + if (secretKey is null) + return []; + + using var request = new HttpRequestMessage(HttpMethod.Get, "models"); + request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey); + + // Set custom headers for project identification: + request.Headers.Add("HTTP-Referer", PROJECT_WEBSITE); + request.Headers.Add("X-Title", PROJECT_NAME); + + using var response = await this.httpClient.SendAsync(request, token); + if(!response.IsSuccessStatusCode) + return []; + + var modelResponse = await response.Content.ReadFromJsonAsync(token); + + // Filter out non-text models (image, audio, embedding models) and convert to Model + return modelResponse.Data + .Where(n => + !n.Id.Contains("whisper", StringComparison.OrdinalIgnoreCase) && + !n.Id.Contains("dall-e", StringComparison.OrdinalIgnoreCase) && + !n.Id.Contains("tts", StringComparison.OrdinalIgnoreCase) && + !n.Id.Contains("embedding", StringComparison.OrdinalIgnoreCase) && + !n.Id.Contains("moderation", StringComparison.OrdinalIgnoreCase) && + !n.Id.Contains("stable-diffusion", StringComparison.OrdinalIgnoreCase) && + !n.Id.Contains("flux", StringComparison.OrdinalIgnoreCase) && + !n.Id.Contains("midjourney", StringComparison.OrdinalIgnoreCase)) + .Select(n => new Model(n.Id, n.Name)); + } + + private async Task> LoadEmbeddingModels(CancellationToken token, string? apiKeyProvisional = null) + { + var secretKey = apiKeyProvisional switch + { + not null => apiKeyProvisional, + _ => await RUST_SERVICE.GetAPIKey(this) switch + { + { Success: true } result => await result.Secret.Decrypt(ENCRYPTION), + _ => null, + } + }; + + if (secretKey is null) + return []; + + using var request = new HttpRequestMessage(HttpMethod.Get, "embeddings/models"); + request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey); + + // Set custom headers for project identification: + request.Headers.Add("HTTP-Referer", PROJECT_WEBSITE); + request.Headers.Add("X-Title", PROJECT_NAME); + + using var response = await this.httpClient.SendAsync(request, token); + if(!response.IsSuccessStatusCode) + return []; + + var modelResponse = await response.Content.ReadFromJsonAsync(token); + + // Convert all embedding models to Model + return modelResponse.Data.Select(n => new Model(n.Id, n.Name)); + } +} diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.OpenRouter.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.OpenRouter.cs new file mode 100644 index 00000000..c1479819 --- /dev/null +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.OpenRouter.cs @@ -0,0 +1,249 @@ +using AIStudio.Provider; + +namespace AIStudio.Settings; + +public static partial class ProviderExtensions +{ + public static List GetModelCapabilitiesOpenRouter(Model model) + { + var modelName = model.Id.ToLowerInvariant().AsSpan(); + + // + // OpenRouter model IDs follow the pattern: "provider/model-name" + // Examples: + // - openai/gpt-4o + // - anthropic/claude-3-5-sonnet + // - google/gemini-pro-1.5 + // - meta-llama/llama-3.1-405b-instruct + // + // We need to detect capabilities based on both provider and model name. + // + + // + // OpenAI models via OpenRouter: + // + if (modelName.IndexOf("openai/") is not -1) + { + // Reasoning models (o1, o3, o4 series) + if (modelName.IndexOf("/o1") is not -1 || + modelName.IndexOf("/o3") is not -1 || + modelName.IndexOf("/o4") is not -1) + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + + // GPT-4o and GPT-5 series with multimodal + if (modelName.IndexOf("/gpt-4o") is not -1 || + modelName.IndexOf("/gpt-5") is not -1 || + modelName.IndexOf("/chatgpt-4o") is not -1) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Standard GPT-4 + if (modelName.IndexOf("/gpt-4") is not -1) + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // GPT-3.5 + if (modelName.IndexOf("/gpt-3.5") is not -1 || + modelName.IndexOf("/gpt-3") is not -1) + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + } + + // + // Anthropic models via OpenRouter: + // + if (modelName.IndexOf("anthropic/") is not -1) + { + // Claude 3.5 and newer with vision + if (modelName.IndexOf("/claude-3.5") is not -1 || + modelName.IndexOf("/claude-3-5") is not -1) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Claude 3 Opus/Sonnet with vision + if (modelName.IndexOf("/claude-3-opus") is not -1 || + modelName.IndexOf("/claude-3-sonnet") is not -1) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Other Claude 3 models + if (modelName.IndexOf("/claude-3") is not -1) + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + } + + // + // Google models via OpenRouter: + // + if (modelName.IndexOf("google/") is not -1) + { + // Gemini models with multimodal + if (modelName.IndexOf("/gemini") is not -1) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // + // xAI Grok models via OpenRouter: + // + if (modelName.IndexOf("x-ai/") is not -1 || modelName.IndexOf("/grok") is not -1) + { + if (modelName.IndexOf("-vision") is not -1) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // + // DeepSeek models via OpenRouter: + // + if (modelName.IndexOf("/deepseek") is not -1) + { + if (modelName.IndexOf("-r1") is not -1 || modelName.IndexOf(" r1") is not -1) + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + } + + // + // Mistral models via OpenRouter: + // + if (modelName.IndexOf("/mistral") is not -1 || modelName.IndexOf("/pixtral") is not -1) + { + if (modelName.IndexOf("/pixtral") is not -1) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + } + + // + // Meta Llama models via OpenRouter: + // + if (modelName.IndexOf("/llama") is not -1) + { + // Llama 4 with vision + if (modelName.IndexOf("/llama-4") is not -1 || + modelName.IndexOf("/llama4") is not -1) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Vision models + if (modelName.IndexOf("-vision") is not -1) + return [ + Capability.TEXT_INPUT, Capability.MULTIPLE_IMAGE_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + + // Llama 3.1+ with function calling + if (modelName.IndexOf("/llama-3.") is not -1 || + modelName.IndexOf("/llama3.") is not -1) + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.FUNCTION_CALLING, + Capability.CHAT_COMPLETION_API, + ]; + + // Default Llama + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + } + + // + // Qwen models via OpenRouter: + // + if (modelName.IndexOf("/qwen") is not -1 || modelName.IndexOf("/qwq") is not -1) + { + if (modelName.IndexOf("/qwq") is not -1) + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.ALWAYS_REASONING, + Capability.CHAT_COMPLETION_API, + ]; + + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + } + + // + // Default for unknown models: + // Assume basic text input/output with chat completion + // + return [ + Capability.TEXT_INPUT, + Capability.TEXT_OUTPUT, + Capability.CHAT_COMPLETION_API, + ]; + } +} diff --git a/app/MindWork AI Studio/Settings/ProviderExtensions.cs b/app/MindWork AI Studio/Settings/ProviderExtensions.cs index 002295c3..984386d1 100644 --- a/app/MindWork AI Studio/Settings/ProviderExtensions.cs +++ b/app/MindWork AI Studio/Settings/ProviderExtensions.cs @@ -14,7 +14,8 @@ public static partial class ProviderExtensions LLMProviders.DEEP_SEEK => GetModelCapabilitiesDeepSeek(provider.Model), LLMProviders.ALIBABA_CLOUD => GetModelCapabilitiesAlibaba(provider.Model), LLMProviders.PERPLEXITY => GetModelCapabilitiesPerplexity(provider.Model), - + LLMProviders.OPEN_ROUTER => GetModelCapabilitiesOpenRouter(provider.Model), + LLMProviders.GROQ => GetModelCapabilitiesOpenSource(provider.Model), LLMProviders.FIREWORKS => GetModelCapabilitiesOpenSource(provider.Model), LLMProviders.HUGGINGFACE => GetModelCapabilitiesOpenSource(provider.Model), diff --git a/app/MindWork AI Studio/wwwroot/changelog/v0.9.55.md b/app/MindWork AI Studio/wwwroot/changelog/v0.9.55.md index 483ec579..f898dece 100644 --- a/app/MindWork AI Studio/wwwroot/changelog/v0.9.55.md +++ b/app/MindWork AI Studio/wwwroot/changelog/v0.9.55.md @@ -1,6 +1,7 @@ # v0.9.55, build 230 (2025-12-xx xx:xx UTC) - Added support for newer Mistral models (Mistral 3, Voxtral, and Magistral). - Added support for the new OpenAI model GPT 5.2. +- Added support for OpenRouter as LLM and embedding provider. - Added a description field to local data sources (preview feature) so that the data selection agent has more information about which data each local source contains when selecting data sources. - Added the ability to use file attachments in chat. This is the initial implementation of this feature. We will continue to develop this feature and refine it further based on user feedback. Many thanks to Sabrina `Sabrina-devops` for this wonderful contribution. - Improved the document analysis assistant (in preview) by adding descriptions to the different sections.