Add timeout handling and error logging across all providers

This commit is contained in:
hart_s3 2026-04-27 14:39:47 +02:00
parent 519abe4fc2
commit 9ec687e22e
4 changed files with 181 additions and 59 deletions

View File

@ -24,6 +24,8 @@ namespace AIStudio.Provider;
/// </summary>
public abstract class BaseProvider : IProvider, ISecretId
{
private static readonly TimeSpan HTTP_TIMEOUT = TimeSpan.FromHours(1);
private static string TB(string fallbackEN) => I18N.I.T(fallbackEN, typeof(BaseProvider).Namespace, nameof(BaseProvider));
/// <summary>
@ -74,6 +76,7 @@ public abstract class BaseProvider : IProvider, ISecretId
// Set the base URL:
this.HttpClient.BaseAddress = new(url);
this.HttpClient.Timeout = HTTP_TIMEOUT;
}
#region Handling of IProvider, which all providers must implement
@ -136,6 +139,28 @@ public abstract class BaseProvider : IProvider, ISecretId
protected static ModelLoadResult FailedModelLoadResult(ModelLoadFailureReason failureReason, string? technicalDetails = null) => ModelLoadResult.Failure(failureReason, technicalDetails);
protected bool IsTimeoutException(Exception exception, CancellationToken token = default)
{
if (token.IsCancellationRequested)
return false;
if (exception is TimeoutException)
return true;
if (exception is OperationCanceledException)
return true;
return exception.InnerException is not null && this.IsTimeoutException(exception.InnerException, token);
}
protected Task SendTimeoutError(string action) => MessageBus.INSTANCE.SendError(new(
Icons.Material.Filled.HourglassTop,
string.Format(
TB("The request to the LLM provider '{0}' (type={1}) timed out after 1 hour while {2}. Please try again or check whether the provider is still responding."),
this.InstanceName,
this.Provider,
action)));
protected async Task<string?> GetModelLoadingSecretKey(SecretStoreType storeType, string? apiKeyProvisional = null, bool isTryingSecret = false) => apiKeyProvisional switch
{
not null => apiKeyProvisional,
@ -175,25 +200,34 @@ public abstract class BaseProvider : IProvider, ISecretId
else if (!string.IsNullOrWhiteSpace(secretKey))
request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey);
using var response = await this.HttpClient.SendAsync(request, token);
var responseBody = await response.Content.ReadAsStringAsync(token);
if (!response.IsSuccessStatusCode)
{
var failureReason = failureReasonSelector?.Invoke(response, responseBody) ?? GetDefaultModelLoadFailureReason(response);
return FailedModelLoadResult(failureReason, $"Status={(int)response.StatusCode} {response.ReasonPhrase}; Body='{responseBody}'");
}
try
{
var parsedResponse = JsonSerializer.Deserialize<TResponse>(responseBody, jsonSerializerOptions ?? JSON_SERIALIZER_OPTIONS);
if (parsedResponse is null)
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_RESPONSE, "Model list response could not be deserialized.");
using var response = await this.HttpClient.SendAsync(request, token);
var responseBody = await response.Content.ReadAsStringAsync(token);
if (!response.IsSuccessStatusCode)
{
var failureReason = failureReasonSelector?.Invoke(response, responseBody) ?? GetDefaultModelLoadFailureReason(response);
return FailedModelLoadResult(failureReason, $"Status={(int)response.StatusCode} {response.ReasonPhrase}; Body='{responseBody}'");
}
return SuccessfulModelLoadResult(modelFactory(parsedResponse));
try
{
var parsedResponse = JsonSerializer.Deserialize<TResponse>(responseBody, jsonSerializerOptions ?? JSON_SERIALIZER_OPTIONS);
if (parsedResponse is null)
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_RESPONSE, "Model list response could not be deserialized.");
return SuccessfulModelLoadResult(modelFactory(parsedResponse));
}
catch (Exception e)
{
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_RESPONSE, e.Message);
}
}
catch (Exception e)
catch (Exception e) when (this.IsTimeoutException(e, token))
{
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_RESPONSE, e.Message);
await this.SendTimeoutError("loading the available models");
this.logger.LogError(e, "Timed out while loading models from provider '{ProviderInstanceName}' (provider={ProviderType}).", this.InstanceName, this.Provider);
return FailedModelLoadResult(ModelLoadFailureReason.PROVIDER_UNAVAILABLE, e.Message);
}
}
@ -223,7 +257,18 @@ public abstract class BaseProvider : IProvider, ISecretId
// Please notice: We do not dispose the response here. The caller is responsible
// for disposing the response object. This is important because the response
// object is used to read the stream.
var nextResponse = await this.HttpClient.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, token);
HttpResponseMessage nextResponse;
try
{
nextResponse = await this.HttpClient.SendAsync(request, HttpCompletionOption.ResponseHeadersRead, token);
}
catch (Exception e) when (this.IsTimeoutException(e, token))
{
await this.SendTimeoutError("waiting for the chat response");
this.logger.LogError(e, "Timed out while sending a streaming request to provider '{ProviderInstanceName}' (provider={ProviderType}).", this.InstanceName, this.Provider);
return new HttpRateLimitedStreamResult(false, true, e.Message, response);
}
if (nextResponse.IsSuccessStatusCode)
{
response = nextResponse;
@ -341,8 +386,20 @@ public abstract class BaseProvider : IProvider, ISecretId
}
catch(Exception e)
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Stream, string.Format(TB("Tried to communicate with the LLM provider '{0}'. There were some problems with the request. The provider message is: '{1}'"), this.InstanceName, e.Message)));
this.logger.LogError($"Failed to stream chat completion from {providerName} '{this.InstanceName}': {e.Message}");
if (token.IsCancellationRequested)
{
this.logger.LogWarning("The user canceled the chat completion request for {ProviderName} '{ProviderInstanceName}' before the response stream was opened.", providerName, this.InstanceName);
}
else if (this.IsTimeoutException(e, token))
{
await this.SendTimeoutError("opening the chat response stream");
this.logger.LogError(e, "Timed out while opening the chat completion stream from {ProviderName} '{ProviderInstanceName}'.", providerName, this.InstanceName);
}
else
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Stream, string.Format(TB("Tried to communicate with the LLM provider '{0}'. There were some problems with the request. The provider message is: '{1}'"), this.InstanceName, e.Message)));
this.logger.LogError($"Failed to stream chat completion from {providerName} '{this.InstanceName}': {e.Message}");
}
}
if (streamReader is null)
@ -383,8 +440,21 @@ public abstract class BaseProvider : IProvider, ISecretId
}
catch (Exception e)
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Stream, string.Format(TB("Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'"), this.InstanceName, e.Message)));
this.logger.LogError($"Failed to read the stream from {providerName} '{this.InstanceName}': {e.Message}");
if (token.IsCancellationRequested)
{
this.logger.LogWarning("The user canceled the chat completion stream for {ProviderName} '{ProviderInstanceName}' while reading the next chunk.", providerName, this.InstanceName);
}
else if (this.IsTimeoutException(e, token))
{
await this.SendTimeoutError("reading the chat response stream");
this.logger.LogError(e, "Timed out while reading the chat stream from {ProviderName} '{ProviderInstanceName}'.", providerName, this.InstanceName);
}
else
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Stream, string.Format(TB("Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'"), this.InstanceName, e.Message)));
this.logger.LogError($"Failed to read the stream from {providerName} '{this.InstanceName}': {e.Message}");
}
break;
}
@ -505,8 +575,20 @@ public abstract class BaseProvider : IProvider, ISecretId
}
catch(Exception e)
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Stream, string.Format(TB("Tried to communicate with the LLM provider '{0}'. There were some problems with the request. The provider message is: '{1}'"), this.InstanceName, e.Message)));
this.logger.LogError($"Failed to stream responses from {providerName} '{this.InstanceName}': {e.Message}");
if (token.IsCancellationRequested)
{
this.logger.LogWarning("The user canceled the responses request for {ProviderName} '{ProviderInstanceName}' before the response stream was opened.", providerName, this.InstanceName);
}
else if (this.IsTimeoutException(e, token))
{
await this.SendTimeoutError("opening the chat response stream");
this.logger.LogError(e, "Timed out while opening the responses stream from {ProviderName} '{ProviderInstanceName}'.", providerName, this.InstanceName);
}
else
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Stream, string.Format(TB("Tried to communicate with the LLM provider '{0}'. There were some problems with the request. The provider message is: '{1}'"), this.InstanceName, e.Message)));
this.logger.LogError($"Failed to stream responses from {providerName} '{this.InstanceName}': {e.Message}");
}
}
if (streamReader is null)
@ -547,8 +629,21 @@ public abstract class BaseProvider : IProvider, ISecretId
}
catch (Exception e)
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Stream, string.Format(TB("Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'"), this.InstanceName, e.Message)));
this.logger.LogError($"Failed to read the stream from {providerName} '{this.InstanceName}': {e.Message}");
if (token.IsCancellationRequested)
{
this.logger.LogWarning("The user canceled the responses stream for {ProviderName} '{ProviderInstanceName}' while reading the next chunk.", providerName, this.InstanceName);
}
else if (this.IsTimeoutException(e, token))
{
await this.SendTimeoutError("reading the chat response stream");
this.logger.LogError(e, "Timed out while reading the responses stream from {ProviderName} '{ProviderInstanceName}'.", providerName, this.InstanceName);
}
else
{
await MessageBus.INSTANCE.SendError(new(Icons.Material.Filled.Stream, string.Format(TB("Tried to stream the LLM provider '{0}' answer. Was not able to read the stream. The message is: '{1}'"), this.InstanceName, e.Message)));
this.logger.LogError($"Failed to read the stream from {providerName} '{this.InstanceName}': {e.Message}");
}
break;
}
@ -784,6 +879,9 @@ public abstract class BaseProvider : IProvider, ISecretId
}
catch (Exception e)
{
if (this.IsTimeoutException(e, token))
await this.SendTimeoutError("transcribing audio");
this.logger.LogError("Failed to perform transcription request: '{Message}'.", e.Message);
return string.Empty;
}
@ -859,6 +957,9 @@ public abstract class BaseProvider : IProvider, ISecretId
}
catch (Exception e)
{
if (this.IsTimeoutException(e, token))
await this.SendTimeoutError("creating embeddings");
this.logger.LogError("Failed to perform embedding request: '{Message}'.", e.Message);
return [];
}

View File

@ -135,6 +135,9 @@ public class ProviderGoogle() : BaseProvider(LLMProviders.GOOGLE, "https://gener
}
catch (Exception e)
{
if (this.IsTimeoutException(e, token))
await this.SendTimeoutError("creating embeddings");
LOGGER.LogError("Failed to perform embedding request: '{Message}'.", e.Message);
return [];
}

View File

@ -125,31 +125,40 @@ public sealed class ProviderHelmholtz() : BaseProvider(LLMProviders.HELMHOLTZ, "
if (string.IsNullOrWhiteSpace(secretKey))
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_OR_MISSING_API_KEY, "No API key available for model loading.");
using var request = new HttpRequestMessage(HttpMethod.Get, "models");
request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey);
using var response = await this.HttpClient.SendAsync(request, token);
var body = await response.Content.ReadAsStringAsync(token);
if (!response.IsSuccessStatusCode)
return FailedModelLoadResult(GetDefaultModelLoadFailureReason(response), $"Status={(int)response.StatusCode} {response.ReasonPhrase}; Body='{body}'");
try
{
var modelResponse = JsonSerializer.Deserialize<ModelsResponse>(body, JSON_SERIALIZER_OPTIONS);
return SuccessfulModelLoadResult(modelResponse.Data);
using var request = new HttpRequestMessage(HttpMethod.Get, "models");
request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey);
using var response = await this.HttpClient.SendAsync(request, token);
var body = await response.Content.ReadAsStringAsync(token);
if (!response.IsSuccessStatusCode)
return FailedModelLoadResult(GetDefaultModelLoadFailureReason(response), $"Status={(int)response.StatusCode} {response.ReasonPhrase}; Body='{body}'");
try
{
var modelResponse = JsonSerializer.Deserialize<ModelsResponse>(body, JSON_SERIALIZER_OPTIONS);
return SuccessfulModelLoadResult(modelResponse.Data);
}
catch (JsonException e)
{
if (body.Contains("API key", StringComparison.InvariantCultureIgnoreCase))
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_OR_MISSING_API_KEY, body);
LOGGER.LogError(e, "Unexpected error while parsing models from Helmholtz API response. Status Code: {StatusCode}. Reason: {ReasonPhrase}. Response Body: '{ResponseBody}'", response.StatusCode, response.ReasonPhrase, body);
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_RESPONSE, body);
}
catch (Exception e)
{
LOGGER.LogError(e, "Unexpected error while loading models from Helmholtz API. Status Code: {StatusCode}. Reason: {ReasonPhrase}", response.StatusCode, response.ReasonPhrase);
return FailedModelLoadResult(ModelLoadFailureReason.UNKNOWN, e.Message);
}
}
catch (JsonException e)
catch (Exception e) when (this.IsTimeoutException(e, token))
{
if (body.Contains("API key", StringComparison.InvariantCultureIgnoreCase))
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_OR_MISSING_API_KEY, body);
LOGGER.LogError(e, "Unexpected error while parsing models from Helmholtz API response. Status Code: {StatusCode}. Reason: {ReasonPhrase}. Response Body: '{ResponseBody}'", response.StatusCode, response.ReasonPhrase, body);
return FailedModelLoadResult(ModelLoadFailureReason.INVALID_RESPONSE, body);
}
catch (Exception e)
{
LOGGER.LogError(e, "Unexpected error while loading models from Helmholtz API. Status Code: {StatusCode}. Reason: {ReasonPhrase}", response.StatusCode, response.ReasonPhrase);
return FailedModelLoadResult(ModelLoadFailureReason.UNKNOWN, e.Message);
await this.SendTimeoutError("loading the available models");
LOGGER.LogError(e, "Timed out while loading models from Helmholtz provider '{ProviderInstanceName}'.", this.InstanceName);
return FailedModelLoadResult(ModelLoadFailureReason.PROVIDER_UNAVAILABLE, e.Message);
}
}
}
}

View File

@ -172,19 +172,28 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
private async Task<ModelLoadResult> LoadModels(SecretStoreType storeType, string[] ignorePhrases, string[] filterPhrases, CancellationToken token, string? apiKeyProvisional = null)
{
var secretKey = await this.GetModelLoadingSecretKey(storeType, apiKeyProvisional, true);
using var lmStudioRequest = new HttpRequestMessage(HttpMethod.Get, "models");
if(secretKey is not null)
lmStudioRequest.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey);
using var lmStudioResponse = await this.HttpClient.SendAsync(lmStudioRequest, token);
if(!lmStudioResponse.IsSuccessStatusCode)
return FailedModelLoadResult(GetDefaultModelLoadFailureReason(lmStudioResponse), $"Status={(int)lmStudioResponse.StatusCode} {lmStudioResponse.ReasonPhrase}");
var lmStudioModelResponse = await lmStudioResponse.Content.ReadFromJsonAsync<ModelsResponse>(token);
return SuccessfulModelLoadResult(lmStudioModelResponse.Data.
Where(model => !ignorePhrases.Any(ignorePhrase => model.Id.Contains(ignorePhrase, StringComparison.InvariantCulture)) &&
filterPhrases.All( filter => model.Id.Contains(filter, StringComparison.InvariantCulture)))
.Select(n => new Provider.Model(n.Id, null)));
try
{
using var lmStudioRequest = new HttpRequestMessage(HttpMethod.Get, "models");
if(secretKey is not null)
lmStudioRequest.Headers.Authorization = new AuthenticationHeaderValue("Bearer", secretKey);
using var lmStudioResponse = await this.HttpClient.SendAsync(lmStudioRequest, token);
if(!lmStudioResponse.IsSuccessStatusCode)
return FailedModelLoadResult(GetDefaultModelLoadFailureReason(lmStudioResponse), $"Status={(int)lmStudioResponse.StatusCode} {lmStudioResponse.ReasonPhrase}");
var lmStudioModelResponse = await lmStudioResponse.Content.ReadFromJsonAsync<ModelsResponse>(token);
return SuccessfulModelLoadResult(lmStudioModelResponse.Data.
Where(model => !ignorePhrases.Any(ignorePhrase => model.Id.Contains(ignorePhrase, StringComparison.InvariantCulture)) &&
filterPhrases.All( filter => model.Id.Contains(filter, StringComparison.InvariantCulture)))
.Select(n => new Provider.Model(n.Id, null)));
}
catch (Exception e) when (this.IsTimeoutException(e, token))
{
await this.SendTimeoutError("loading the available models");
LOGGER.LogError(e, "Timed out while loading models from self-hosted provider '{ProviderInstanceName}'.", this.InstanceName);
return FailedModelLoadResult(ModelLoadFailureReason.PROVIDER_UNAVAILABLE, e.Message);
}
}
}
}