mirror of
https://github.com/MindWorkAI/AI-Studio.git
synced 2025-02-05 10:49:07 +00:00
Added xAI as a new provider (#254)
This commit is contained in:
parent
33a2728644
commit
2ef4d9d53f
@ -35,6 +35,7 @@ Other News:
|
||||
|
||||
Features we have recently released:
|
||||
|
||||
- v0.9.25: Added [xAI](https://x.ai/) as a new provider. xAI provides their Grok models for generating content.
|
||||
- v0.9.23: Added support for OpenAI `o` models (`o1`, `o1-mini`, `o3`, etc.); added also an [ERI](https://github.com/MindWorkAI/ERI) server coding assistant as a preview feature behind the RAG feature flag. Your own ERI server can be used to gain access to, e.g., your enterprise data from within AI Studio.
|
||||
- v0.9.22: Added options for preview features; added embedding provider configuration for RAG (preview) and writer mode (experimental preview).
|
||||
- v0.9.18: Added the new Anthropic Heiku model; added Groq and Google Gemini as provider options.
|
||||
@ -42,7 +43,6 @@ Features we have recently released:
|
||||
- v0.9.16: Added workspace display options & improved the layout of the app window.
|
||||
- v0.9.15: Added the bias-of-the-day assistant. Tells you about a cognitive bias every day.
|
||||
- v0.9.13: You can use `ollama` providers secured with API keys.
|
||||
- v0.9.12: Added a job posting assistant to the business category and improved grammar & spelling check and rewrite assistants.
|
||||
|
||||
## What is AI Studio?
|
||||
|
||||
@ -53,7 +53,7 @@ MindWork AI Studio is a desktop application available for macOS, Windows, and Li
|
||||
|
||||
**Key advantages:**
|
||||
- **Free of charge**: The app is free to use, both for personal and commercial purposes.
|
||||
- **Independence**: You are not tied to any single provider. Instead, you can choose the provider that best suits their needs. Right now, we support OpenAI (GPT4o etc.), Mistral, Anthropic (Claude), Google Gemini, and self-hosted models using [llama.cpp](https://github.com/ggerganov/llama.cpp), [ollama](https://github.com/ollama/ollama), [LM Studio](https://lmstudio.ai/), [Groq](https://groq.com/), or [Fireworks](https://fireworks.ai/).
|
||||
- **Independence**: You are not tied to any single provider. Instead, you can choose the provider that best suits their needs. Right now, we support OpenAI (GPT4o etc.), Mistral, Anthropic (Claude), Google Gemini, xAI (Grok), and self-hosted models using [llama.cpp](https://github.com/ggerganov/llama.cpp), [ollama](https://github.com/ollama/ollama), [LM Studio](https://lmstudio.ai/), [Groq](https://groq.com/), or [Fireworks](https://fireworks.ai/).
|
||||
- **Unrestricted usage**: Unlike services like ChatGPT, which impose limits after intensive use, MindWork AI Studio offers unlimited usage through the providers API.
|
||||
- **Cost-effective**: You only pay for what you use, which can be cheaper than monthly subscription services like ChatGPT Plus, especially if used infrequently. But beware, here be dragons: For extremely intensive usage, the API costs can be significantly higher. Unfortunately, providers currently do not offer a way to display current costs in the app. Therefore, check your account with the respective provider to see how your costs are developing. When available, use prepaid and set a cost limit.
|
||||
- **Privacy**: The data entered into the app is not used for training by the providers since we are using the provider's API.
|
||||
|
@ -33,7 +33,7 @@ public partial class Home : ComponentBase
|
||||
private static readonly TextItem[] ITEMS_ADVANTAGES =
|
||||
[
|
||||
new("Free of charge", "The app is free to use, both for personal and commercial purposes."),
|
||||
new("Independence", "You are not tied to any single provider. Instead, you might choose the provider that best suits your needs. Right now, we support OpenAI (GPT4o etc.), Mistral, Anthropic (Claude), Google Gemini, and self-hosted models using llama.cpp, ollama, LM Studio, Groq, or Fireworks."),
|
||||
new("Independence", "You are not tied to any single provider. Instead, you might choose the provider that best suits your needs. Right now, we support OpenAI (GPT4o etc.), Mistral, Anthropic (Claude), Google Gemini, xAI (Grok), and self-hosted models using llama.cpp, ollama, LM Studio, Groq, or Fireworks."),
|
||||
new("Unrestricted usage", "Unlike services like ChatGPT, which impose limits after intensive use, MindWork AI Studio offers unlimited usage through the providers API."),
|
||||
new("Cost-effective", "You only pay for what you use, which can be cheaper than monthly subscription services like ChatGPT Plus, especially if used infrequently. But beware, here be dragons: For extremely intensive usage, the API costs can be significantly higher. Unfortunately, providers currently do not offer a way to display current costs in the app. Therefore, check your account with the respective provider to see how your costs are developing. When available, use prepaid and set a cost limit."),
|
||||
new("Privacy", "The data entered into the app is not used for training by the providers since we are using the provider's API."),
|
||||
|
@ -11,6 +11,7 @@ public enum LLMProviders
|
||||
ANTHROPIC = 2,
|
||||
MISTRAL = 3,
|
||||
GOOGLE = 7,
|
||||
X = 8,
|
||||
|
||||
FIREWORKS = 5,
|
||||
GROQ = 6,
|
||||
|
@ -5,6 +5,7 @@ using AIStudio.Provider.Groq;
|
||||
using AIStudio.Provider.Mistral;
|
||||
using AIStudio.Provider.OpenAI;
|
||||
using AIStudio.Provider.SelfHosted;
|
||||
using AIStudio.Provider.X;
|
||||
using AIStudio.Settings;
|
||||
|
||||
using Host = AIStudio.Provider.SelfHosted.Host;
|
||||
@ -26,6 +27,7 @@ public static class LLMProvidersExtensions
|
||||
LLMProviders.ANTHROPIC => "Anthropic",
|
||||
LLMProviders.MISTRAL => "Mistral",
|
||||
LLMProviders.GOOGLE => "Google",
|
||||
LLMProviders.X => "xAI",
|
||||
|
||||
LLMProviders.GROQ => "Groq",
|
||||
LLMProviders.FIREWORKS => "Fireworks.ai",
|
||||
@ -62,6 +64,8 @@ public static class LLMProvidersExtensions
|
||||
|
||||
LLMProviders.MISTRAL => Confidence.GDPR_NO_TRAINING.WithRegion("Europe, France").WithSources("https://mistral.ai/terms/#terms-of-service-la-plateforme").WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)),
|
||||
|
||||
LLMProviders.X => Confidence.USA_NO_TRAINING.WithRegion("America, U.S.").WithSources("https://x.ai/legal/terms-of-service-enterprise").WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)),
|
||||
|
||||
LLMProviders.SELF_HOSTED => Confidence.SELF_HOSTED.WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)),
|
||||
|
||||
_ => Confidence.UNKNOWN.WithLevel(settingsManager.GetConfiguredConfidenceLevel(llmProvider)),
|
||||
@ -87,6 +91,7 @@ public static class LLMProvidersExtensions
|
||||
LLMProviders.GROQ => false,
|
||||
LLMProviders.ANTHROPIC => false,
|
||||
LLMProviders.FIREWORKS => false,
|
||||
LLMProviders.X => false,
|
||||
|
||||
//
|
||||
// Self-hosted providers are treated as a special case anyway.
|
||||
@ -128,6 +133,7 @@ public static class LLMProvidersExtensions
|
||||
LLMProviders.ANTHROPIC => new ProviderAnthropic(logger) { InstanceName = instanceName },
|
||||
LLMProviders.MISTRAL => new ProviderMistral(logger) { InstanceName = instanceName },
|
||||
LLMProviders.GOOGLE => new ProviderGoogle(logger) { InstanceName = instanceName },
|
||||
LLMProviders.X => new ProviderX(logger) { InstanceName = instanceName },
|
||||
|
||||
LLMProviders.GROQ => new ProviderGroq(logger) { InstanceName = instanceName },
|
||||
LLMProviders.FIREWORKS => new ProviderFireworks(logger) { InstanceName = instanceName },
|
||||
@ -150,6 +156,7 @@ public static class LLMProvidersExtensions
|
||||
LLMProviders.MISTRAL => "https://console.mistral.ai/",
|
||||
LLMProviders.ANTHROPIC => "https://console.anthropic.com/dashboard",
|
||||
LLMProviders.GOOGLE => "https://console.cloud.google.com/",
|
||||
LLMProviders.X => "https://accounts.x.ai/sign-up",
|
||||
|
||||
LLMProviders.GROQ => "https://console.groq.com/",
|
||||
LLMProviders.FIREWORKS => "https://fireworks.ai/login",
|
||||
@ -162,6 +169,7 @@ public static class LLMProvidersExtensions
|
||||
LLMProviders.OPEN_AI => "https://platform.openai.com/usage",
|
||||
LLMProviders.MISTRAL => "https://console.mistral.ai/usage/",
|
||||
LLMProviders.ANTHROPIC => "https://console.anthropic.com/settings/plans",
|
||||
LLMProviders.X => "https://console.x.ai/",
|
||||
LLMProviders.GROQ => "https://console.groq.com/settings/usage",
|
||||
LLMProviders.GOOGLE => "https://console.cloud.google.com/billing",
|
||||
LLMProviders.FIREWORKS => "https://fireworks.ai/account/billing",
|
||||
@ -174,6 +182,7 @@ public static class LLMProvidersExtensions
|
||||
LLMProviders.OPEN_AI => true,
|
||||
LLMProviders.MISTRAL => true,
|
||||
LLMProviders.ANTHROPIC => true,
|
||||
LLMProviders.X => true,
|
||||
LLMProviders.GROQ => true,
|
||||
LLMProviders.FIREWORKS => true,
|
||||
LLMProviders.GOOGLE => true,
|
||||
@ -217,6 +226,7 @@ public static class LLMProvidersExtensions
|
||||
LLMProviders.MISTRAL => true,
|
||||
LLMProviders.ANTHROPIC => true,
|
||||
LLMProviders.GOOGLE => true,
|
||||
LLMProviders.X => true,
|
||||
|
||||
LLMProviders.GROQ => true,
|
||||
LLMProviders.FIREWORKS => true,
|
||||
@ -232,6 +242,7 @@ public static class LLMProvidersExtensions
|
||||
LLMProviders.MISTRAL => true,
|
||||
LLMProviders.ANTHROPIC => true,
|
||||
LLMProviders.GOOGLE => true,
|
||||
LLMProviders.X => true,
|
||||
|
||||
LLMProviders.GROQ => true,
|
||||
LLMProviders.FIREWORKS => true,
|
||||
|
152
app/MindWork AI Studio/Provider/X/ProviderX.cs
Normal file
152
app/MindWork AI Studio/Provider/X/ProviderX.cs
Normal file
@ -0,0 +1,152 @@
|
||||
using System.Net.Http.Headers;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
|
||||
using AIStudio.Chat;
|
||||
using AIStudio.Provider.OpenAI;
|
||||
using AIStudio.Settings;
|
||||
|
||||
namespace AIStudio.Provider.X;
|
||||
|
||||
public sealed class ProviderX(ILogger logger) : BaseProvider("https://api.x.ai/v1/", logger)
|
||||
{
|
||||
#region Implementation of IProvider
|
||||
|
||||
/// <inheritdoc />
|
||||
public override string Id => LLMProviders.X.ToName();
|
||||
|
||||
/// <inheritdoc />
|
||||
public override string InstanceName { get; set; } = "xAI";
|
||||
|
||||
/// <inheritdoc />
|
||||
public override async IAsyncEnumerable<string> StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
|
||||
{
|
||||
// Get the API key:
|
||||
var requestedSecret = await RUST_SERVICE.GetAPIKey(this);
|
||||
if(!requestedSecret.Success)
|
||||
yield break;
|
||||
|
||||
// Prepare the system prompt:
|
||||
var systemPrompt = new Message
|
||||
{
|
||||
Role = "system",
|
||||
Content = chatThread.PrepareSystemPrompt(settingsManager, chatThread, this.logger),
|
||||
};
|
||||
|
||||
// Prepare the xAI HTTP chat request:
|
||||
var xChatRequest = JsonSerializer.Serialize(new ChatRequest
|
||||
{
|
||||
Model = chatModel.Id,
|
||||
|
||||
// Build the messages:
|
||||
// - First of all the system prompt
|
||||
// - Then none-empty user and AI messages
|
||||
Messages = [systemPrompt, ..chatThread.Blocks.Where(n => n.ContentType is ContentType.TEXT && !string.IsNullOrWhiteSpace((n.Content as ContentText)?.Text)).Select(n => new Message
|
||||
{
|
||||
Role = n.Role switch
|
||||
{
|
||||
ChatRole.USER => "user",
|
||||
ChatRole.AI => "assistant",
|
||||
ChatRole.AGENT => "assistant",
|
||||
ChatRole.SYSTEM => "system",
|
||||
|
||||
_ => "user",
|
||||
},
|
||||
|
||||
Content = n.Content switch
|
||||
{
|
||||
ContentText text => text.Text,
|
||||
_ => string.Empty,
|
||||
}
|
||||
}).ToList()],
|
||||
|
||||
Seed = chatThread.Seed,
|
||||
|
||||
// Right now, we only support streaming completions:
|
||||
Stream = true,
|
||||
}, JSON_SERIALIZER_OPTIONS);
|
||||
|
||||
async Task<HttpRequestMessage> RequestBuilder()
|
||||
{
|
||||
// Build the HTTP post request:
|
||||
var request = new HttpRequestMessage(HttpMethod.Post, "chat/completions");
|
||||
|
||||
// Set the authorization header:
|
||||
request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", await requestedSecret.Secret.Decrypt(ENCRYPTION));
|
||||
|
||||
// Set the content:
|
||||
request.Content = new StringContent(xChatRequest, Encoding.UTF8, "application/json");
|
||||
return request;
|
||||
}
|
||||
|
||||
await foreach (var content in this.StreamChatCompletionInternal<ResponseStreamLine>("xAI", RequestBuilder, token))
|
||||
yield return content;
|
||||
}
|
||||
|
||||
#pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously
|
||||
/// <inheritdoc />
|
||||
public override async IAsyncEnumerable<ImageURL> StreamImageCompletion(Model imageModel, string promptPositive, string promptNegative = FilterOperator.String.Empty, ImageURL referenceImageURL = default, [EnumeratorCancellation] CancellationToken token = default)
|
||||
{
|
||||
yield break;
|
||||
}
|
||||
#pragma warning restore CS1998 // Async method lacks 'await' operators and will run synchronously
|
||||
|
||||
/// <inheritdoc />
|
||||
public override Task<IEnumerable<Model>> GetTextModels(string? apiKeyProvisional = null, CancellationToken token = default)
|
||||
{
|
||||
return this.LoadModels(["grok-"], token, apiKeyProvisional);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public override Task<IEnumerable<Model>> GetImageModels(string? apiKeyProvisional = null, CancellationToken token = default)
|
||||
{
|
||||
return Task.FromResult<IEnumerable<Model>>([]);
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public override Task<IEnumerable<Model>> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default)
|
||||
{
|
||||
return Task.FromResult<IEnumerable<Model>>([]);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
private async Task<IEnumerable<Model>> LoadModels(string[] prefixes, CancellationToken token, string? apiKeyProvisional = null)
|
||||
{
|
||||
var secretKey = apiKeyProvisional switch
|
||||
{
|
||||
not null => apiKeyProvisional,
|
||||
_ => await RUST_SERVICE.GetAPIKey(this) switch
|
||||
{
|
||||
{ Success: true } result => await result.Secret.Decrypt(ENCRYPTION),
|
||||
_ => null,
|
||||
}
|
||||
};
|
||||
|
||||
if (secretKey is null)
|
||||
return [];
|
||||
|
||||
var request = new HttpRequestMessage(HttpMethod.Get, "models");
|
||||
request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey);
|
||||
|
||||
var response = await this.httpClient.SendAsync(request, token);
|
||||
if(!response.IsSuccessStatusCode)
|
||||
return [];
|
||||
|
||||
var modelResponse = await response.Content.ReadFromJsonAsync<ModelsResponse>(token);
|
||||
|
||||
//
|
||||
// The API does not return the alias model names, so we have to add them manually:
|
||||
// Right now, the only alias to add is `grok-2-latest`.
|
||||
//
|
||||
return modelResponse.Data.Where(model => prefixes.Any(prefix => model.Id.StartsWith(prefix, StringComparison.InvariantCulture)))
|
||||
.Concat([
|
||||
new Model
|
||||
{
|
||||
Id = "grok-2-latest",
|
||||
DisplayName = "Grok 2.0 (latest)",
|
||||
}
|
||||
]);
|
||||
}
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
# v0.9.25, build 200 (2025-01-xx xx:xx UTC)
|
||||
- Added xAI as a new provider. xAI provides their Grok models for generating content.
|
||||
- Improved the stop generation button behavior to ensure that the AI stops generating content immediately (which will save compute time, energy and financial resources).
|
||||
- Restructured the streaming network code to be centralized out of the individual providers. This will allow for easier maintenance and updates in the future.
|
Loading…
Reference in New Issue
Block a user