diff --git a/app/MindWork AI Studio/Provider/OpenAI/ChatRequest.cs b/app/MindWork AI Studio/Provider/OpenAI/ChatRequest.cs new file mode 100644 index 0000000..79f89fd --- /dev/null +++ b/app/MindWork AI Studio/Provider/OpenAI/ChatRequest.cs @@ -0,0 +1,21 @@ +using System.ComponentModel.DataAnnotations; + +namespace AIStudio.Provider.OpenAI; + +/// +/// The OpenAI chat request model. +/// +/// Which model to use for chat completion. +/// The chat messages. +/// Whether to stream the chat completion. +/// The seed for the chat completion. +/// The frequency penalty for the chat completion. +public readonly record struct ChatRequest( + string Model, + IList Messages, + bool Stream, + int Seed, + + [Range(-2.0f, 2.0f)] + float FrequencyPenalty +); \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/OpenAI/Message.cs b/app/MindWork AI Studio/Provider/OpenAI/Message.cs new file mode 100644 index 0000000..508645b --- /dev/null +++ b/app/MindWork AI Studio/Provider/OpenAI/Message.cs @@ -0,0 +1,8 @@ +namespace AIStudio.Provider.OpenAI; + +/// +/// Chat message model. +/// +/// The text content of the message. +/// The role of the message. +public readonly record struct Message(string Content, string Role); \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/OpenAI/ModelResponse.cs b/app/MindWork AI Studio/Provider/OpenAI/ModelResponse.cs new file mode 100644 index 0000000..6abf39d --- /dev/null +++ b/app/MindWork AI Studio/Provider/OpenAI/ModelResponse.cs @@ -0,0 +1,7 @@ +namespace AIStudio.Provider.OpenAI; + +/// +/// A data model for the response from the model endpoint. +/// +/// +public readonly record struct ModelsResponse(IList Data); \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/OpenAI/ProviderOpenAI.cs b/app/MindWork AI Studio/Provider/OpenAI/ProviderOpenAI.cs new file mode 100644 index 0000000..ed5abfe --- /dev/null +++ b/app/MindWork AI Studio/Provider/OpenAI/ProviderOpenAI.cs @@ -0,0 +1,188 @@ +using System.Net.Http.Headers; +using System.Runtime.CompilerServices; +using System.Text; +using System.Text.Json; + +using AIStudio.Chat; +using AIStudio.Settings; + +using Microsoft.JSInterop; + +using MudBlazor; + +namespace AIStudio.Provider.OpenAI; + +/// +/// The OpenAI provider. +/// +public sealed class ProviderOpenAI() : BaseProvider("https://api.openai.com/v1/"), IProvider +{ + private static readonly JsonSerializerOptions JSON_SERIALIZER_OPTIONS = new() + { + PropertyNamingPolicy = JsonNamingPolicy.SnakeCaseLower, + }; + + #region Implementation of IProvider + + /// + public string Id => "OpenAI"; + + /// + public string InstanceName { get; set; } = "OpenAI"; + + /// + public async IAsyncEnumerable StreamChatCompletion(IJSRuntime jsRuntime, SettingsManager settings, Model chatModel, ChatThread chatThread, [EnumeratorCancellation] CancellationToken token = default) + { + // Get the API key: + var requestedSecret = await settings.GetAPIKey(jsRuntime, this); + if(!requestedSecret.Success) + yield break; + + // Prepare the system prompt: + var systemPrompt = new Message + { + Role = "system", + Content = chatThread.SystemPrompt, + }; + + // Prepare the OpenAI HTTP chat request: + var openAIChatRequest = JsonSerializer.Serialize(new ChatRequest + { + Model = chatModel.Id, + + // Build the messages: + // - First of all the system prompt + // - Then none-empty user and AI messages + Messages = [systemPrompt, ..chatThread.Blocks.Where(n => n.ContentType is ContentType.TEXT && !string.IsNullOrWhiteSpace((n.Content as ContentText)?.Text)).Select(n => new Message + { + Role = n.Role switch + { + ChatRole.USER => "user", + ChatRole.AI => "assistant", + ChatRole.SYSTEM => "system", + + _ => "user", + }, + + Content = n.Content switch + { + ContentText text => text.Text, + _ => string.Empty, + } + }).ToList()], + + Seed = chatThread.Seed, + + // Right now, we only support streaming completions: + Stream = true, + FrequencyPenalty = 0f, + }, JSON_SERIALIZER_OPTIONS); + + // Build the HTTP post request: + var request = new HttpRequestMessage(HttpMethod.Post, "chat/completions"); + + // Set the authorization header: + request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", requestedSecret.Secret); + + // Set the content: + request.Content = new StringContent(openAIChatRequest, Encoding.UTF8, "application/json"); + + // Send the request with the ResponseHeadersRead option. + // This allows us to read the stream as soon as the headers are received. + // This is important because we want to stream the responses. + var response = await this.httpClient.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, token); + + // Open the response stream: + var openAIStream = await response.Content.ReadAsStreamAsync(token); + + // Add a stream reader to read the stream, line by line: + var streamReader = new StreamReader(openAIStream); + + // Read the stream, line by line: + while(!streamReader.EndOfStream) + { + // Check if the token is cancelled: + if(token.IsCancellationRequested) + yield break; + + // Read the next line: + var line = await streamReader.ReadLineAsync(token); + + // Skip empty lines: + if(string.IsNullOrWhiteSpace(line)) + continue; + + // Skip lines that do not start with "data: ". Regard + // to the specification, we only want to read the data lines: + if(!line.StartsWith("data: ", StringComparison.InvariantCulture)) + continue; + + // Check if the line is the end of the stream: + if (line.StartsWith("data: [DONE]", StringComparison.InvariantCulture)) + yield break; + + ResponseStreamLine openAIResponse; + try + { + // We know that the line starts with "data: ". Hence, we can + // skip the first 6 characters to get the JSON data after that. + var jsonData = line[6..]; + + // Deserialize the JSON data: + openAIResponse = JsonSerializer.Deserialize(jsonData, JSON_SERIALIZER_OPTIONS); + } + catch + { + // Skip invalid JSON data: + continue; + } + + // Skip empty responses: + if(openAIResponse == default || openAIResponse.Choices.Count == 0) + continue; + + // Yield the response: + yield return openAIResponse.Choices[0].Delta.Content; + } + } + + #pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously + /// + public async IAsyncEnumerable StreamImageCompletion(IJSRuntime jsRuntime, SettingsManager settings, Model imageModel, string promptPositive, string promptNegative = FilterOperator.String.Empty, ImageURL referenceImageURL = default, [EnumeratorCancellation] CancellationToken token = default) + { + yield break; + } + #pragma warning restore CS1998 // Async method lacks 'await' operators and will run synchronously + + /// + public async Task> GetTextModels(IJSRuntime jsRuntime, SettingsManager settings, CancellationToken token = default) + { + return await this.LoadModels(jsRuntime, settings, "gpt-", token); + } + + /// + public async Task> GetImageModels(IJSRuntime jsRuntime, SettingsManager settings, CancellationToken token = default) + { + return await this.LoadModels(jsRuntime, settings, "dall-e-", token); + } + + #endregion + + private async Task> LoadModels(IJSRuntime jsRuntime, SettingsManager settings, string prefix, CancellationToken token) + { + var requestedSecret = await settings.GetAPIKey(jsRuntime, this); + if(!requestedSecret.Success) + return new List(); + + var request = new HttpRequestMessage(HttpMethod.Get, "models"); + request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", requestedSecret.Secret); + + var emptyList = new List(); + var response = await this.httpClient.SendAsync(request, token); + if(!response.IsSuccessStatusCode) + return emptyList; + + var modelResponse = await response.Content.ReadFromJsonAsync(token); + return modelResponse.Data.Where(n => n.Id.StartsWith(prefix, StringComparison.InvariantCulture)).ToList(); + } +} \ No newline at end of file diff --git a/app/MindWork AI Studio/Provider/OpenAI/ResponseStreamLine.cs b/app/MindWork AI Studio/Provider/OpenAI/ResponseStreamLine.cs new file mode 100644 index 0000000..0e938fd --- /dev/null +++ b/app/MindWork AI Studio/Provider/OpenAI/ResponseStreamLine.cs @@ -0,0 +1,25 @@ +namespace AIStudio.Provider.OpenAI; + +/// +/// Data model for a line in the response stream, for streaming completions. +/// +/// The id of the response. +/// The object describing the response. +/// The timestamp of the response. +/// The model used for the response. +/// The system fingerprint; together with the seed, this allows you to reproduce the response. +/// The choices made by the AI. +public readonly record struct ResponseStreamLine(string Id, string Object, uint Created, string Model, string SystemFingerprint, IList Choices); + +/// +/// Data model for a choice made by the AI. +/// +/// The index of the choice. +/// The delta text of the choice. +public readonly record struct Choice(int Index, Delta Delta); + +/// +/// The delta text of a choice. +/// +/// The content of the delta text. +public readonly record struct Delta(string Content); \ No newline at end of file