diff --git a/app/MindWork AI Studio/Provider/Google/Model.cs b/app/MindWork AI Studio/Provider/Google/Model.cs
deleted file mode 100644
index f1a53282..00000000
--- a/app/MindWork AI Studio/Provider/Google/Model.cs
+++ /dev/null
@@ -1,3 +0,0 @@
-namespace AIStudio.Provider.Google;
-
-public readonly record struct Model(string Name, string DisplayName);
\ No newline at end of file
diff --git a/app/MindWork AI Studio/Provider/Google/ModelsResponse.cs b/app/MindWork AI Studio/Provider/Google/ModelsResponse.cs
deleted file mode 100644
index 01cb81f9..00000000
--- a/app/MindWork AI Studio/Provider/Google/ModelsResponse.cs
+++ /dev/null
@@ -1,7 +0,0 @@
-namespace AIStudio.Provider.Google;
-
-///
-/// A data model for the response from the model endpoint.
-///
-///
-public readonly record struct ModelsResponse(IList Models);
\ No newline at end of file
diff --git a/app/MindWork AI Studio/Provider/Google/ProviderGoogle.cs b/app/MindWork AI Studio/Provider/Google/ProviderGoogle.cs
index 48dea49e..97157080 100644
--- a/app/MindWork AI Studio/Provider/Google/ProviderGoogle.cs
+++ b/app/MindWork AI Studio/Provider/Google/ProviderGoogle.cs
@@ -22,7 +22,7 @@ public class ProviderGoogle() : BaseProvider(LLMProviders.GOOGLE, "https://gener
public override string InstanceName { get; set; } = "Google Gemini";
///
- public override async IAsyncEnumerable StreamChatCompletion(Provider.Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
+ public override async IAsyncEnumerable StreamChatCompletion(Model chatModel, ChatThread chatThread, SettingsManager settingsManager, [EnumeratorCancellation] CancellationToken token = default)
{
// Get the API key:
var requestedSecret = await RUST_SERVICE.GetAPIKey(this, SecretStoreType.LLM_PROVIDER);
@@ -76,57 +76,50 @@ public class ProviderGoogle() : BaseProvider(LLMProviders.GOOGLE, "https://gener
#pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously
///
- public override async IAsyncEnumerable StreamImageCompletion(Provider.Model imageModel, string promptPositive, string promptNegative = FilterOperator.String.Empty, ImageURL referenceImageURL = default, [EnumeratorCancellation] CancellationToken token = default)
+ public override async IAsyncEnumerable StreamImageCompletion(Model imageModel, string promptPositive, string promptNegative = FilterOperator.String.Empty, ImageURL referenceImageURL = default, [EnumeratorCancellation] CancellationToken token = default)
{
yield break;
}
#pragma warning restore CS1998 // Async method lacks 'await' operators and will run synchronously
///
- public override Task TranscribeAudioAsync(Provider.Model transcriptionModel, string audioFilePath, SettingsManager settingsManager, CancellationToken token = default)
+ public override Task TranscribeAudioAsync(Model transcriptionModel, string audioFilePath, SettingsManager settingsManager, CancellationToken token = default)
{
return Task.FromResult(string.Empty);
}
///
- public override async Task> GetTextModels(string? apiKeyProvisional = null, CancellationToken token = default)
+ public override async Task> GetTextModels(string? apiKeyProvisional = null, CancellationToken token = default)
{
- var modelResponse = await this.LoadModels(SecretStoreType.LLM_PROVIDER, token, apiKeyProvisional);
- if(modelResponse == default)
- return [];
-
- return modelResponse.Models.Where(model =>
- model.Name.StartsWith("models/gemini-", StringComparison.OrdinalIgnoreCase) && !model.Name.Contains("embed"))
- .Select(n => new Provider.Model(n.Name.Replace("models/", string.Empty), n.DisplayName));
+ var models = await this.LoadModels(SecretStoreType.LLM_PROVIDER, token, apiKeyProvisional);
+ return models.Where(model =>
+ model.Id.StartsWith("gemini-", StringComparison.OrdinalIgnoreCase) &&
+ !this.IsEmbeddingModel(model.Id))
+ .Select(this.WithDisplayNameFallback);
}
///
- public override Task> GetImageModels(string? apiKeyProvisional = null, CancellationToken token = default)
+ public override Task> GetImageModels(string? apiKeyProvisional = null, CancellationToken token = default)
{
- return Task.FromResult(Enumerable.Empty());
+ return Task.FromResult(Enumerable.Empty());
}
- public override async Task> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default)
+ public override async Task> GetEmbeddingModels(string? apiKeyProvisional = null, CancellationToken token = default)
{
- var modelResponse = await this.LoadModels(SecretStoreType.EMBEDDING_PROVIDER, token, apiKeyProvisional);
- if(modelResponse == default)
- return [];
-
- return modelResponse.Models.Where(model =>
- model.Name.StartsWith("models/text-embedding-", StringComparison.OrdinalIgnoreCase) ||
- model.Name.StartsWith("models/gemini-embed", StringComparison.OrdinalIgnoreCase))
- .Select(n => new Provider.Model(n.Name.Replace("models/", string.Empty), n.DisplayName));
+ var models = await this.LoadModels(SecretStoreType.EMBEDDING_PROVIDER, token, apiKeyProvisional);
+ return models.Where(model => this.IsEmbeddingModel(model.Id))
+ .Select(this.WithDisplayNameFallback);
}
///
- public override Task> GetTranscriptionModels(string? apiKeyProvisional = null, CancellationToken token = default)
+ public override Task> GetTranscriptionModels(string? apiKeyProvisional = null, CancellationToken token = default)
{
- return Task.FromResult(Enumerable.Empty());
+ return Task.FromResult(Enumerable.Empty());
}
#endregion
- private async Task LoadModels(SecretStoreType storeType, CancellationToken token, string? apiKeyProvisional = null)
+ private async Task> LoadModels(SecretStoreType storeType, CancellationToken token, string? apiKeyProvisional = null)
{
var secretKey = apiKeyProvisional switch
{
@@ -138,16 +131,57 @@ public class ProviderGoogle() : BaseProvider(LLMProviders.GOOGLE, "https://gener
}
};
- if (secretKey is null)
- return default;
+ if (string.IsNullOrWhiteSpace(secretKey))
+ return [];
- using var request = new HttpRequestMessage(HttpMethod.Get, $"models?key={secretKey}");
- using var response = await this.httpClient.SendAsync(request, token);
+ using var request = new HttpRequestMessage(HttpMethod.Get, "models");
+ request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey);
+ using var response = await this.httpClient.SendAsync(request, token);
if(!response.IsSuccessStatusCode)
- return default;
+ {
+ LOGGER.LogError("Failed to load models with status code {ResponseStatusCode} and body: '{ResponseBody}'.", response.StatusCode, await response.Content.ReadAsStringAsync(token));
+ return [];
+ }
- var modelResponse = await response.Content.ReadFromJsonAsync(token);
- return modelResponse;
+ try
+ {
+ var modelResponse = await response.Content.ReadFromJsonAsync(token);
+ if (modelResponse == default || modelResponse.Data.Count is 0)
+ {
+ LOGGER.LogError("Google model list response did not contain a valid data array.");
+ return [];
+ }
+
+ return modelResponse.Data
+ .Where(model => !string.IsNullOrWhiteSpace(model.Id))
+ .Select(model => new Model(this.NormalizeModelId(model.Id), model.DisplayName))
+ .ToArray();
+ }
+ catch (Exception e)
+ {
+ LOGGER.LogError("Failed to parse Google model list response: '{Message}'.", e.Message);
+ return [];
+ }
+ }
+
+ private bool IsEmbeddingModel(string modelId)
+ {
+ return modelId.Contains("embedding", StringComparison.OrdinalIgnoreCase) ||
+ modelId.Contains("embed", StringComparison.OrdinalIgnoreCase);
+ }
+
+ private Model WithDisplayNameFallback(Model model)
+ {
+ return string.IsNullOrWhiteSpace(model.DisplayName)
+ ? new Model(model.Id, model.Id)
+ : model;
+ }
+
+ private string NormalizeModelId(string modelId)
+ {
+ return modelId.StartsWith("models/", StringComparison.OrdinalIgnoreCase)
+ ? modelId["models/".Length..]
+ : modelId;
}
}
\ No newline at end of file
diff --git a/app/MindWork AI Studio/wwwroot/changelog/v26.2.2.md b/app/MindWork AI Studio/wwwroot/changelog/v26.2.2.md
index 9ca3477a..b059b034 100644
--- a/app/MindWork AI Studio/wwwroot/changelog/v26.2.2.md
+++ b/app/MindWork AI Studio/wwwroot/changelog/v26.2.2.md
@@ -15,5 +15,6 @@
- Fixed an issue where manually saving chats in workspace manual-storage mode could appear unreliable during response streaming. The save button is now disabled while streaming to prevent partial saves.
- Fixed an issue where in some places "No profile" was displayed instead of the localized text.
- Fixed a bug in the Responses API of our OpenAI provider implementation where streamed whitespace chunks were discarded. We thank Oliver Kunc `OliverKunc` for his first contribution in resolving this issue. We appreciate your help, Oliver.
+- Fixed the Google Gemini model API. Switched to the default OpenAI-compatible API to retrieve the model list after Google changed the previous API, which stopped working.
- Upgraded to .NET 9.0.13 & Rust 1.93.1.
- Upgraded dependencies.
\ No newline at end of file