mirror of
https://github.com/MindWorkAI/AI-Studio.git
synced 2025-02-05 10:29:07 +00:00
Fixed OpenAI o
models (#239)
This commit is contained in:
parent
21927754b6
commit
12b3d6fc3d
@ -1,3 +1,5 @@
|
||||
using System.Net;
|
||||
|
||||
using AIStudio.Chat;
|
||||
|
||||
using RustService = AIStudio.Tools.RustService;
|
||||
@ -103,6 +105,13 @@ public abstract class BaseProvider : IProvider, ISecretId
|
||||
break;
|
||||
}
|
||||
|
||||
if(nextResponse.StatusCode is HttpStatusCode.BadRequest)
|
||||
{
|
||||
this.logger.LogError($"Failed request with status code {nextResponse.StatusCode} (message = '{nextResponse.ReasonPhrase}').");
|
||||
errorMessage = nextResponse.ReasonPhrase;
|
||||
break;
|
||||
}
|
||||
|
||||
errorMessage = nextResponse.ReasonPhrase;
|
||||
var timeSeconds = Math.Pow(RETRY_DELAY_SECONDS, retry + 1);
|
||||
if(timeSeconds > 90)
|
||||
@ -112,7 +121,7 @@ public abstract class BaseProvider : IProvider, ISecretId
|
||||
await Task.Delay(TimeSpan.FromSeconds(timeSeconds), token);
|
||||
}
|
||||
|
||||
if(retry >= MAX_RETRIES)
|
||||
if(retry >= MAX_RETRIES || !string.IsNullOrWhiteSpace(errorMessage))
|
||||
return new HttpRateLimitedStreamResult(false, true, errorMessage ?? $"Failed after {MAX_RETRIES} retries; no provider message available", response);
|
||||
|
||||
return new HttpRateLimitedStreamResult(true, false, string.Empty, response);
|
||||
|
@ -33,10 +33,35 @@ public sealed class ProviderOpenAI(ILogger logger) : BaseProvider("https://api.o
|
||||
if(!requestedSecret.Success)
|
||||
yield break;
|
||||
|
||||
// Unfortunately, OpenAI changed the name of the system prompt based on the model.
|
||||
// All models that start with "o" (the omni aka reasoning models) and all GPT4o models
|
||||
// have the system prompt named "developer". All other models have the system prompt
|
||||
// named "system". We need to check this to get the correct system prompt.
|
||||
//
|
||||
// To complicate it even more: The early versions of reasoning models, which are released
|
||||
// before the 17th of December 2024, have no system prompt at all. We need to check this
|
||||
// as well.
|
||||
|
||||
// Apply the basic rule first:
|
||||
var systemPromptRole = chatModel.Id.StartsWith('o') || chatModel.Id.Contains("4o") ? "developer" : "system";
|
||||
|
||||
// Check if the model is an early version of the reasoning models:
|
||||
systemPromptRole = chatModel.Id switch
|
||||
{
|
||||
"o1-mini" => "user",
|
||||
"o1-mini-2024-09-12" => "user",
|
||||
"o1-preview" => "user",
|
||||
"o1-preview-2024-09-12" => "user",
|
||||
|
||||
_ => systemPromptRole,
|
||||
};
|
||||
|
||||
this.logger.LogInformation($"Using the system prompt role '{systemPromptRole}' for model '{chatModel.Id}'.");
|
||||
|
||||
// Prepare the system prompt:
|
||||
var systemPrompt = new Message
|
||||
{
|
||||
Role = "system",
|
||||
Role = systemPromptRole,
|
||||
Content = chatThread.SystemPrompt,
|
||||
};
|
||||
|
||||
@ -55,7 +80,7 @@ public sealed class ProviderOpenAI(ILogger logger) : BaseProvider("https://api.o
|
||||
ChatRole.USER => "user",
|
||||
ChatRole.AI => "assistant",
|
||||
ChatRole.AGENT => "assistant",
|
||||
ChatRole.SYSTEM => "system",
|
||||
ChatRole.SYSTEM => systemPromptRole,
|
||||
|
||||
_ => "user",
|
||||
},
|
||||
@ -71,7 +96,6 @@ public sealed class ProviderOpenAI(ILogger logger) : BaseProvider("https://api.o
|
||||
|
||||
// Right now, we only support streaming completions:
|
||||
Stream = true,
|
||||
FrequencyPenalty = 0f,
|
||||
}, JSON_SERIALIZER_OPTIONS);
|
||||
|
||||
async Task<HttpRequestMessage> RequestBuilder()
|
||||
|
@ -3,6 +3,7 @@
|
||||
- Improved provider requests by handling rate limits by retrying requests.
|
||||
- Improved the creation of the "the bias of the day" workspace; create that workspace only when the bias of the day feature is used.
|
||||
- Improved the save operation of settings by using a temporary file to avoid data loss in rare cases.
|
||||
- Fixed OpenAI `o` (aka omni, aka reasoning) models. The early preview versions (released before 17th December 2024) could not use any system prompts —- we translated the system prompts to be user prompts. Final versions of the OpenAI `o` models can now use system prompts, by they are named `developer` instead of `system`.
|
||||
- Fixed layout issues when selecting `other` items (e.g., programming languages).
|
||||
- Fixed a bug about the bias of the day workspace when the workspace component was hidden.
|
||||
- Upgraded dependencies to the latest versions.
|
Loading…
Reference in New Issue
Block a user