From fef0f7d2484df23708178599b3347e1ab392a93d Mon Sep 17 00:00:00 2001 From: Thorsten Sommer Date: Sat, 24 Jan 2026 20:15:16 +0100 Subject: [PATCH] Improved error handling in enterprise environment service. --- .../Services/EnterpriseEnvironmentService.cs | 38 +++++++++++++++++-- .../wwwroot/changelog/v26.1.2.md | 5 ++- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/app/MindWork AI Studio/Tools/Services/EnterpriseEnvironmentService.cs b/app/MindWork AI Studio/Tools/Services/EnterpriseEnvironmentService.cs index 079224c6..a5916bf7 100644 --- a/app/MindWork AI Studio/Tools/Services/EnterpriseEnvironmentService.cs +++ b/app/MindWork AI Studio/Tools/Services/EnterpriseEnvironmentService.cs @@ -34,16 +34,46 @@ public sealed class EnterpriseEnvironmentService(ILogger plugin.Id == enterpriseRemoveConfigId); if (enterpriseRemoveConfigId != Guid.Empty && isPlugin2RemoveInUse) { logger.LogWarning($"The enterprise environment configuration ID '{enterpriseRemoveConfigId}' must be removed."); PluginFactory.RemovePluginAsync(enterpriseRemoveConfigId); } - - var enterpriseConfigServerUrl = await rustService.EnterpriseEnvConfigServerUrl(); - var enterpriseConfigId = await rustService.EnterpriseEnvConfigId(); + + string? enterpriseConfigServerUrl; + try + { + enterpriseConfigServerUrl = await rustService.EnterpriseEnvConfigServerUrl(); + } + catch (Exception e) + { + logger.LogError(e, "Failed to fetch the enterprise configuration server URL from the Rust service."); + return; + } + + Guid enterpriseConfigId; + try + { + enterpriseConfigId = await rustService.EnterpriseEnvConfigId(); + } + catch (Exception e) + { + logger.LogError(e, "Failed to fetch the enterprise configuration ID from the Rust service."); + return; + } + var etag = await PluginFactory.DetermineConfigPluginETagAsync(enterpriseConfigId, enterpriseConfigServerUrl); var nextEnterpriseEnvironment = new EnterpriseEnvironment(enterpriseConfigServerUrl, enterpriseConfigId, etag); if (CURRENT_ENVIRONMENT != nextEnterpriseEnvironment) diff --git a/app/MindWork AI Studio/wwwroot/changelog/v26.1.2.md b/app/MindWork AI Studio/wwwroot/changelog/v26.1.2.md index 7b4d4528..2199c74e 100644 --- a/app/MindWork AI Studio/wwwroot/changelog/v26.1.2.md +++ b/app/MindWork AI Studio/wwwroot/changelog/v26.1.2.md @@ -3,12 +3,13 @@ - Added the current date and time to the system prompt for better context in conversations. Thanks Peer `peerschuett` for the contribution. - Added the ability to control the voice recording with transcription (in preview) by using a system-wide shortcut. The shortcut can be configured in the application settings or by using a configuration plugin. Thus, a uniform shortcut can be defined for an entire organization. - Added error handling for the context window overflow, which can occur with huge file attachments in chats or the document analysis assistant. -- Improved error handling for model loading in provider dialogs (LLMs, embeddings, transcriptions). +- Improved the error handling for model loading in provider dialogs (LLMs, embeddings, transcriptions). - Improved the microphone handling (transcription preview) so that all sound effects and the voice recording are processed without interruption. - Improved the handling of self-hosted providers in the configuration dialogs (LLMs, embeddings, and transcriptions) when the host cannot provide a list of models. - Improved the document analysis assistant (in preview) by allowing users to send results to a new chat to ask follow-up questions. Thanks to Sabrina `Sabrina-devops` for this contribution. - Improved the developer experience by detecting incorrect CPU architecture metadata when checking and installing the Pandoc dependency. -- Improved error messages for failed communication with AI servers. +- Improved the error messages for failed communication with AI servers. +- Improved the error handling for the enterprise environment service regarding the communication with our Rust layer. - Fixed a logging bug that prevented log events from being recorded in some cases. - Fixed a bug that allowed adding a provider (LLM, embedding, or transcription) without selecting a model. - Fixed a bug with local transcription providers by handling errors correctly when the local provider is unavailable.