mirror of
https://github.com/MindWorkAI/AI-Studio.git
synced 2026-02-13 02:41:37 +00:00
Merge branch 'main' into pr/628
# Conflicts: # app/MindWork AI Studio/Components/ChatComponent.razor.cs # app/MindWork AI Studio/wwwroot/changelog/v26.1.2.md
This commit is contained in:
commit
9c125d5e34
@ -27,6 +27,7 @@
|
||||
<script src="system/MudBlazor.Markdown/MudBlazor.Markdown.min.js"></script>
|
||||
<script src="system/CodeBeam.MudBlazor.Extensions/MudExtensions.min.js"></script>
|
||||
<script src="app.js"></script>
|
||||
<script src="audio.js"></script>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@ -118,10 +118,47 @@ public partial class DocumentAnalysisAssistant : AssistantBaseCore<SettingsDialo
|
||||
|
||||
protected override bool SubmitDisabled => (this.IsNoPolicySelected || this.loadedDocumentPaths.Count==0);
|
||||
|
||||
protected override ChatThread ConvertToChatThread => (this.chatThread ?? new()) with
|
||||
protected override ChatThread ConvertToChatThread
|
||||
{
|
||||
SystemPrompt = SystemPrompts.DEFAULT,
|
||||
};
|
||||
get
|
||||
{
|
||||
if (this.chatThread is null || this.chatThread.Blocks.Count < 2)
|
||||
{
|
||||
return new ChatThread
|
||||
{
|
||||
SystemPrompt = SystemPrompts.DEFAULT
|
||||
};
|
||||
}
|
||||
|
||||
return new ChatThread
|
||||
{
|
||||
ChatId = Guid.NewGuid(),
|
||||
Name = string.Format(T("{0} - Document Analysis Session"), this.selectedPolicy?.PolicyName ?? T("Empty")),
|
||||
SystemPrompt = SystemPrompts.DEFAULT,
|
||||
Blocks =
|
||||
[
|
||||
// Replace the first "user block" (here, it was/is the block generated by the assistant) with a new one
|
||||
// that includes the loaded document paths and a standard message about the previous analysis session:
|
||||
new ContentBlock
|
||||
{
|
||||
Time = this.chatThread.Blocks.First().Time,
|
||||
Role = ChatRole.USER,
|
||||
HideFromUser = false,
|
||||
ContentType = ContentType.TEXT,
|
||||
Content = new ContentText
|
||||
{
|
||||
Text = this.T("The result of your previous document analysis session."),
|
||||
FileAttachments = this.loadedDocumentPaths.ToList(),
|
||||
}
|
||||
},
|
||||
|
||||
// Then, append the last block of the current chat thread
|
||||
// (which is expected to be the AI response):
|
||||
this.chatThread.Blocks.Last(),
|
||||
]
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
protected override void ResetForm()
|
||||
{
|
||||
|
||||
@ -382,6 +382,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::CODING::COMMONCODINGLANGUAGEEXTENSIONS::T
|
||||
-- None
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::CODING::COMMONCODINGLANGUAGEEXTENSIONS::T810547195"] = "None"
|
||||
|
||||
-- {0} - Document Analysis Session
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T108097007"] = "{0} - Document Analysis Session"
|
||||
|
||||
-- Use the analysis and output rules to define how the AI evaluates your documents and formats the results.
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T1155482668"] = "Use the analysis and output rules to define how the AI evaluates your documents and formats the results."
|
||||
|
||||
@ -436,6 +439,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTA
|
||||
-- Export policy as configuration section
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2556564432"] = "Export policy as configuration section"
|
||||
|
||||
-- The result of your previous document analysis session.
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2570551055"] = "The result of your previous document analysis session."
|
||||
|
||||
-- Are you sure you want to delete the document analysis policy '{0}'?
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2582525917"] = "Are you sure you want to delete the document analysis policy '{0}'?"
|
||||
|
||||
@ -469,6 +475,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTA
|
||||
-- Document Analysis Assistant
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T348883878"] = "Document Analysis Assistant"
|
||||
|
||||
-- Empty
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T3512147854"] = "Empty"
|
||||
|
||||
-- Analysis and output rules
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T3555314296"] = "Analysis and output rules"
|
||||
|
||||
@ -2101,6 +2110,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T14695
|
||||
-- Add Embedding
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1738753945"] = "Add Embedding"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1760715963"] = "Uses the provider-configured model"
|
||||
|
||||
-- Are you sure you want to delete the embedding provider '{0}'?
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1825371968"] = "Are you sure you want to delete the embedding provider '{0}'?"
|
||||
|
||||
@ -2164,6 +2176,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T162847
|
||||
-- Description
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1725856265"] = "Description"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1760715963"] = "Uses the provider-configured model"
|
||||
|
||||
-- Add Provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1806589097"] = "Add Provider"
|
||||
|
||||
@ -2206,9 +2221,6 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T291173
|
||||
-- Configured LLM Providers
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3019870540"] = "Configured LLM Providers"
|
||||
|
||||
-- as selected by provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3082210376"] = "as selected by provider"
|
||||
|
||||
-- Edit
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3267849393"] = "Edit"
|
||||
|
||||
@ -2266,6 +2278,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T14
|
||||
-- Add transcription provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T1645238629"] = "Add transcription provider"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T1760715963"] = "Uses the provider-configured model"
|
||||
|
||||
-- Add Transcription Provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T2066315685"] = "Add Transcription Provider"
|
||||
|
||||
@ -3205,6 +3220,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T290547799"] = "Cur
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T416738168"] = "Model selection"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T504465522"] = "We are currently unable to communicate with the provider to load models. Please try again later."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
@ -3412,12 +3430,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Show Expert
|
||||
-- Show available models
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Show available models"
|
||||
|
||||
-- This host uses the model configured at the provider level. No model selection is available.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3783329915"] = "This host uses the model configured at the provider level. No model selection is available."
|
||||
|
||||
-- Currently, we cannot query the models for the selected provider and/or host. Therefore, please enter the model name manually.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T4116737656"] = "Currently, we cannot query the models for the selected provider and/or host. Therefore, please enter the model name manually."
|
||||
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T416738168"] = "Model selection"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T504465522"] = "We are currently unable to communicate with the provider to load models. Please try again later."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
@ -4633,9 +4657,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T2842060373"] =
|
||||
-- Please enter a transcription model name.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T3703662664"] = "Please enter a transcription model name."
|
||||
|
||||
-- This host uses the model configured at the provider level. No model selection is available.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T3783329915"] = "This host uses the model configured at the provider level. No model selection is available."
|
||||
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T416738168"] = "Model selection"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T504465522"] = "We are currently unable to communicate with the provider to load models. Please try again later."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
|
||||
@ -102,7 +102,7 @@ public partial class ChatComponent : MSGComponentBase, IAsyncDisposable
|
||||
IncludeDateTime = true,
|
||||
};
|
||||
|
||||
this.Logger.LogInformation($"The chat '{this.ChatThread.Name}' with {this.ChatThread.Blocks.Count} messages was deferred and will be rendered now.");
|
||||
this.Logger.LogInformation($"The chat '{this.ChatThread.ChatId}' with {this.ChatThread.Blocks.Count} messages was deferred and will be rendered now.");
|
||||
await this.ChatThreadChanged.InvokeAsync(this.ChatThread);
|
||||
|
||||
// We know already that the chat thread is not null,
|
||||
@ -207,7 +207,6 @@ public partial class ChatComponent : MSGComponentBase, IAsyncDisposable
|
||||
|
||||
// Select the correct provider:
|
||||
await this.SelectProviderWhenLoadingChat();
|
||||
|
||||
await base.OnInitializedAsync();
|
||||
}
|
||||
|
||||
|
||||
@ -35,7 +35,7 @@
|
||||
<MudTd>@context.Num</MudTd>
|
||||
<MudTd>@context.Name</MudTd>
|
||||
<MudTd>@context.UsedLLMProvider.ToName()</MudTd>
|
||||
<MudTd>@GetEmbeddingProviderModelName(context)</MudTd>
|
||||
<MudTd>@this.GetEmbeddingProviderModelName(context)</MudTd>
|
||||
|
||||
<MudTd>
|
||||
<MudStack Row="true" Class="mb-2 mt-2" Spacing="1" Wrap="Wrap.Wrap">
|
||||
|
||||
@ -15,8 +15,12 @@ public partial class SettingsPanelEmbeddings : SettingsPanelBase
|
||||
[Parameter]
|
||||
public EventCallback<List<ConfigurationSelectData<string>>> AvailableEmbeddingProvidersChanged { get; set; }
|
||||
|
||||
private static string GetEmbeddingProviderModelName(EmbeddingProvider provider)
|
||||
private string GetEmbeddingProviderModelName(EmbeddingProvider provider)
|
||||
{
|
||||
// For system models, return localized text:
|
||||
if (provider.Model.IsSystemModel)
|
||||
return T("Uses the provider-configured model");
|
||||
|
||||
const int MAX_LENGTH = 36;
|
||||
var modelName = provider.Model.ToString();
|
||||
return modelName.Length > MAX_LENGTH ? "[...] " + modelName[^Math.Min(MAX_LENGTH, modelName.Length)..] : modelName;
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
@using AIStudio.Provider
|
||||
@using AIStudio.Settings
|
||||
@using AIStudio.Provider.SelfHosted
|
||||
@inherits SettingsPanelBase
|
||||
|
||||
<ExpansionPanel HeaderIcon="@Icons.Material.Filled.Layers" HeaderText="@T("Configure LLM Providers")">
|
||||
@ -29,20 +28,7 @@
|
||||
<MudTd>@context.Num</MudTd>
|
||||
<MudTd>@context.InstanceName</MudTd>
|
||||
<MudTd>@context.UsedLLMProvider.ToName()</MudTd>
|
||||
<MudTd>
|
||||
@if (context.UsedLLMProvider is not LLMProviders.SELF_HOSTED)
|
||||
{
|
||||
@GetLLMProviderModelName(context)
|
||||
}
|
||||
else if (context.UsedLLMProvider is LLMProviders.SELF_HOSTED && context.Host is not Host.LLAMA_CPP)
|
||||
{
|
||||
@GetLLMProviderModelName(context)
|
||||
}
|
||||
else
|
||||
{
|
||||
@T("as selected by provider")
|
||||
}
|
||||
</MudTd>
|
||||
<MudTd>@this.GetLLMProviderModelName(context)</MudTd>
|
||||
<MudTd>
|
||||
<MudStack Row="true" Class="mb-2 mt-2" Spacing="1" Wrap="Wrap.Wrap">
|
||||
@if (context.IsEnterpriseConfiguration)
|
||||
|
||||
@ -134,8 +134,12 @@ public partial class SettingsPanelProviders : SettingsPanelBase
|
||||
await this.MessageBus.SendMessage<bool>(this, Event.CONFIGURATION_CHANGED);
|
||||
}
|
||||
|
||||
private static string GetLLMProviderModelName(AIStudio.Settings.Provider provider)
|
||||
private string GetLLMProviderModelName(AIStudio.Settings.Provider provider)
|
||||
{
|
||||
// For system models, return localized text:
|
||||
if (provider.Model.IsSystemModel)
|
||||
return T("Uses the provider-configured model");
|
||||
|
||||
const int MAX_LENGTH = 36;
|
||||
var modelName = provider.Model.ToString();
|
||||
return modelName.Length > MAX_LENGTH ? "[...] " + modelName[^Math.Min(MAX_LENGTH, modelName.Length)..] : modelName;
|
||||
|
||||
@ -32,7 +32,7 @@
|
||||
<MudTd>@context.Num</MudTd>
|
||||
<MudTd>@context.Name</MudTd>
|
||||
<MudTd>@context.UsedLLMProvider.ToName()</MudTd>
|
||||
<MudTd>@GetTranscriptionProviderModelName(context)</MudTd>
|
||||
<MudTd>@this.GetTranscriptionProviderModelName(context)</MudTd>
|
||||
|
||||
<MudTd>
|
||||
<MudStack Row="true" Class="mb-2 mt-2" Spacing="1" Wrap="Wrap.Wrap">
|
||||
|
||||
@ -15,8 +15,12 @@ public partial class SettingsPanelTranscription : SettingsPanelBase
|
||||
[Parameter]
|
||||
public EventCallback<List<ConfigurationSelectData<string>>> AvailableTranscriptionProvidersChanged { get; set; }
|
||||
|
||||
private static string GetTranscriptionProviderModelName(TranscriptionProvider provider)
|
||||
private string GetTranscriptionProviderModelName(TranscriptionProvider provider)
|
||||
{
|
||||
// For system models, return localized text:
|
||||
if (provider.Model.IsSystemModel)
|
||||
return T("Uses the provider-configured model");
|
||||
|
||||
const int MAX_LENGTH = 36;
|
||||
var modelName = provider.Model.ToString();
|
||||
return modelName.Length > MAX_LENGTH ? "[...] " + modelName[^Math.Min(MAX_LENGTH, modelName.Length)..] : modelName;
|
||||
|
||||
@ -20,6 +20,18 @@ public partial class VoiceRecorder : MSGComponentBase
|
||||
[Inject]
|
||||
private ISnackbar Snackbar { get; init; } = null!;
|
||||
|
||||
#region Overrides of MSGComponentBase
|
||||
|
||||
protected override async Task OnInitializedAsync()
|
||||
{
|
||||
await base.OnInitializedAsync();
|
||||
|
||||
// Initialize sound effects. This "warms up" the AudioContext and preloads all sounds for reliable playback:
|
||||
await this.JsRuntime.InvokeVoidAsync("initSoundEffects");
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
private uint numReceivedChunks;
|
||||
private bool isRecording;
|
||||
private bool isTranscribing;
|
||||
@ -39,6 +51,9 @@ public partial class VoiceRecorder : MSGComponentBase
|
||||
{
|
||||
if (toggled)
|
||||
{
|
||||
// Warm up sound effects:
|
||||
await this.JsRuntime.InvokeVoidAsync("initSoundEffects");
|
||||
|
||||
var mimeTypes = GetPreferredMimeTypes(
|
||||
Builder.Create().UseAudio().UseSubtype(AudioSubtype.OGG).Build(),
|
||||
Builder.Create().UseAudio().UseSubtype(AudioSubtype.AAC).Build(),
|
||||
@ -189,7 +204,11 @@ public partial class VoiceRecorder : MSGComponentBase
|
||||
private async Task TranscribeRecordingAsync()
|
||||
{
|
||||
if (this.finalRecordingPath is null)
|
||||
{
|
||||
// No recording to transcribe, but still release the microphone:
|
||||
await this.ReleaseMicrophoneAsync();
|
||||
return;
|
||||
}
|
||||
|
||||
this.isTranscribing = true;
|
||||
this.StateHasChanged();
|
||||
@ -223,7 +242,7 @@ public partial class VoiceRecorder : MSGComponentBase
|
||||
{
|
||||
this.Logger.LogWarning(
|
||||
"The configured transcription provider '{ProviderName}' has a confidence level of '{ProviderLevel}', which is below the minimum required level of '{MinimumLevel}'.",
|
||||
transcriptionProviderSettings.Name,
|
||||
transcriptionProviderSettings.UsedLLMProvider,
|
||||
providerConfidence.Level,
|
||||
minimumLevel);
|
||||
await this.MessageBus.SendError(new(Icons.Material.Filled.VoiceChat, this.T("The configured transcription provider does not meet the minimum confidence level.")));
|
||||
@ -240,7 +259,7 @@ public partial class VoiceRecorder : MSGComponentBase
|
||||
}
|
||||
|
||||
// Call the transcription API:
|
||||
this.Logger.LogInformation("Starting transcription with provider '{ProviderName}' and model '{ModelName}'.", transcriptionProviderSettings.Name, transcriptionProviderSettings.Model.DisplayName);
|
||||
this.Logger.LogInformation("Starting transcription with provider '{ProviderName}' and model '{ModelName}'.", transcriptionProviderSettings.UsedLLMProvider, transcriptionProviderSettings.Model.ToString());
|
||||
var transcribedText = await provider.TranscribeAudioAsync(transcriptionProviderSettings.Model, this.finalRecordingPath, this.SettingsManager);
|
||||
|
||||
if (string.IsNullOrWhiteSpace(transcribedText))
|
||||
@ -288,12 +307,22 @@ public partial class VoiceRecorder : MSGComponentBase
|
||||
}
|
||||
finally
|
||||
{
|
||||
await this.ReleaseMicrophoneAsync();
|
||||
|
||||
this.finalRecordingPath = null;
|
||||
this.isTranscribing = false;
|
||||
this.StateHasChanged();
|
||||
}
|
||||
}
|
||||
|
||||
private async Task ReleaseMicrophoneAsync()
|
||||
{
|
||||
// Wait a moment for any queued sounds to finish playing, then release the microphone.
|
||||
// This allows Bluetooth headsets to switch back to A2DP profile without interrupting audio:
|
||||
await Task.Delay(1_800);
|
||||
await this.JsRuntime.InvokeVoidAsync("audioRecorder.releaseMicrophone");
|
||||
}
|
||||
|
||||
private sealed class AudioRecordingResult
|
||||
{
|
||||
public string MimeType { get; init; } = string.Empty;
|
||||
|
||||
@ -44,7 +44,7 @@
|
||||
|
||||
@if (this.DataLLMProvider.IsHostNeeded())
|
||||
{
|
||||
<MudSelect @bind-Value="@this.DataHost" Label="@T("Host")" Class="mb-3" OpenIcon="@Icons.Material.Filled.ExpandMore" AdornmentColor="Color.Info" Adornment="Adornment.Start" Validation="@this.providerValidation.ValidatingHost">
|
||||
<MudSelect T="Host" Value="@this.DataHost" ValueChanged="@this.OnHostChanged" Label="@T("Host")" Class="mb-3" OpenIcon="@Icons.Material.Filled.ExpandMore" AdornmentColor="Color.Info" Adornment="Adornment.Start" Validation="@this.providerValidation.ValidatingHost">
|
||||
@foreach (Host host in Enum.GetValues(typeof(Host)))
|
||||
{
|
||||
if (host.IsEmbeddingSupported())
|
||||
@ -101,6 +101,12 @@
|
||||
}
|
||||
}
|
||||
</MudStack>
|
||||
@if (!string.IsNullOrWhiteSpace(this.dataLoadingModelsIssue))
|
||||
{
|
||||
<MudAlert Severity="Severity.Error" Class="mt-3">
|
||||
@this.dataLoadingModelsIssue
|
||||
</MudAlert>
|
||||
}
|
||||
</MudField>
|
||||
|
||||
@* ReSharper disable once CSharpWarnings::CS8974 *@
|
||||
|
||||
@ -71,7 +71,10 @@ public partial class EmbeddingProviderDialog : MSGComponentBase, ISecretId
|
||||
|
||||
[Inject]
|
||||
private RustService RustService { get; init; } = null!;
|
||||
|
||||
|
||||
[Inject]
|
||||
private ILogger<EmbeddingProviderDialog> Logger { get; init; } = null!;
|
||||
|
||||
private static readonly Dictionary<string, object?> SPELLCHECK_ATTRIBUTES = new();
|
||||
|
||||
/// <summary>
|
||||
@ -85,7 +88,8 @@ public partial class EmbeddingProviderDialog : MSGComponentBase, ISecretId
|
||||
private string dataManuallyModel = string.Empty;
|
||||
private string dataAPIKeyStorageIssue = string.Empty;
|
||||
private string dataEditingPreviousInstanceName = string.Empty;
|
||||
|
||||
private string dataLoadingModelsIssue = string.Empty;
|
||||
|
||||
// We get the form reference from Blazor code to validate it manually:
|
||||
private MudForm form = null!;
|
||||
|
||||
@ -102,6 +106,7 @@ public partial class EmbeddingProviderDialog : MSGComponentBase, ISecretId
|
||||
GetPreviousInstanceName = () => this.dataEditingPreviousInstanceName,
|
||||
GetUsedInstanceNames = () => this.UsedInstanceNames,
|
||||
GetHost = () => this.DataHost,
|
||||
IsModelProvidedManually = () => this.DataLLMProvider is LLMProviders.SELF_HOSTED && this.DataHost is Host.OLLAMA,
|
||||
};
|
||||
}
|
||||
|
||||
@ -208,7 +213,16 @@ public partial class EmbeddingProviderDialog : MSGComponentBase, ISecretId
|
||||
{
|
||||
await this.form.Validate();
|
||||
this.dataAPIKeyStorageIssue = string.Empty;
|
||||
|
||||
|
||||
// Manually validate the model selection (needed when no models are loaded
|
||||
// and the MudSelect is not rendered):
|
||||
var modelValidationError = this.providerValidation.ValidatingModel(this.DataModel);
|
||||
if (!string.IsNullOrWhiteSpace(modelValidationError))
|
||||
{
|
||||
this.dataIssues = [..this.dataIssues, modelValidationError];
|
||||
this.dataIsValid = false;
|
||||
}
|
||||
|
||||
// When the data is not valid, we don't store it:
|
||||
if (!this.dataIsValid)
|
||||
return;
|
||||
@ -250,21 +264,40 @@ public partial class EmbeddingProviderDialog : MSGComponentBase, ISecretId
|
||||
await this.form.Validate();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void OnHostChanged(Host selectedHost)
|
||||
{
|
||||
// When the host changes, reset the model selection state:
|
||||
this.DataHost = selectedHost;
|
||||
this.DataModel = default;
|
||||
this.dataManuallyModel = string.Empty;
|
||||
this.availableModels.Clear();
|
||||
this.dataLoadingModelsIssue = string.Empty;
|
||||
}
|
||||
|
||||
private async Task ReloadModels()
|
||||
{
|
||||
this.dataLoadingModelsIssue = string.Empty;
|
||||
var currentEmbeddingProviderSettings = this.CreateEmbeddingProviderSettings();
|
||||
var provider = currentEmbeddingProviderSettings.CreateProvider();
|
||||
if(provider is NoProvider)
|
||||
if (provider is NoProvider)
|
||||
return;
|
||||
|
||||
var models = await provider.GetEmbeddingModels(this.dataAPIKey);
|
||||
|
||||
// Order descending by ID means that the newest models probably come first:
|
||||
var orderedModels = models.OrderByDescending(n => n.Id);
|
||||
|
||||
this.availableModels.Clear();
|
||||
this.availableModels.AddRange(orderedModels);
|
||||
|
||||
try
|
||||
{
|
||||
var models = await provider.GetEmbeddingModels(this.dataAPIKey);
|
||||
|
||||
// Order descending by ID means that the newest models probably come first:
|
||||
var orderedModels = models.OrderByDescending(n => n.Id);
|
||||
|
||||
this.availableModels.Clear();
|
||||
this.availableModels.AddRange(orderedModels);
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
this.Logger.LogError($"Failed to load models from provider '{this.DataLLMProvider}' (host={this.DataHost}, hostname='{this.DataHostname}'): {e.Message}");
|
||||
this.dataLoadingModelsIssue = T("We are currently unable to communicate with the provider to load models. Please try again later.");
|
||||
}
|
||||
}
|
||||
|
||||
private string APIKeyText => this.DataLLMProvider switch
|
||||
|
||||
@ -41,7 +41,7 @@
|
||||
|
||||
@if (this.DataLLMProvider.IsHostNeeded())
|
||||
{
|
||||
<MudSelect @bind-Value="@this.DataHost" Label="@T("Host")" Class="mb-3" OpenIcon="@Icons.Material.Filled.ExpandMore" AdornmentColor="Color.Info" Adornment="Adornment.Start" Validation="@this.providerValidation.ValidatingHost">
|
||||
<MudSelect T="Host" Value="@this.DataHost" ValueChanged="@this.OnHostChanged" Label="@T("Host")" Class="mb-3" OpenIcon="@Icons.Material.Filled.ExpandMore" AdornmentColor="Color.Info" Adornment="Adornment.Start" Validation="@this.providerValidation.ValidatingHost">
|
||||
@foreach (Host host in Enum.GetValues(typeof(Host)))
|
||||
{
|
||||
@if (host.IsChatSupported())
|
||||
@ -71,52 +71,69 @@
|
||||
@* ReSharper restore Asp.Entity *@
|
||||
}
|
||||
|
||||
<MudField FullWidth="true" Label="@T("Model selection")" Variant="Variant.Outlined" Class="mb-3">
|
||||
<MudStack Row="@true" AlignItems="AlignItems.Center" StretchItems="StretchItems.End">
|
||||
@if (this.DataLLMProvider.IsLLMModelProvidedManually())
|
||||
{
|
||||
<MudButton Variant="Variant.Filled" Size="Size.Small" StartIcon="@Icons.Material.Filled.OpenInBrowser" Href="@this.DataLLMProvider.GetModelsOverviewURL(this.HFInferenceProviderId)" Target="_blank">
|
||||
@T("Show available models")
|
||||
</MudButton>
|
||||
<MudTextField
|
||||
T="string"
|
||||
@bind-Text="@this.dataManuallyModel"
|
||||
Label="@T("Model")"
|
||||
Adornment="Adornment.Start"
|
||||
AdornmentIcon="@Icons.Material.Filled.FaceRetouchingNatural"
|
||||
AdornmentColor="Color.Info"
|
||||
Validation="@this.ValidateManuallyModel"
|
||||
UserAttributes="@SPELLCHECK_ATTRIBUTES"
|
||||
HelperText="@T("Currently, we cannot query the models for the selected provider and/or host. Therefore, please enter the model name manually.")"
|
||||
/>
|
||||
}
|
||||
else
|
||||
{
|
||||
<MudButton Disabled="@(!this.DataLLMProvider.CanLoadModels(this.DataHost, this.dataAPIKey))" Variant="Variant.Filled" Size="Size.Small" StartIcon="@Icons.Material.Filled.Refresh" OnClick="@this.ReloadModels">
|
||||
@T("Load models")
|
||||
</MudButton>
|
||||
@if(this.availableModels.Count is 0)
|
||||
@if (!this.DataLLMProvider.IsLLMModelSelectionHidden(this.DataHost))
|
||||
{
|
||||
<MudField FullWidth="true" Label="@T("Model selection")" Variant="Variant.Outlined" Class="mb-3">
|
||||
<MudStack Row="@true" AlignItems="AlignItems.Center" StretchItems="StretchItems.End">
|
||||
@if (this.DataLLMProvider.IsLLMModelProvidedManually())
|
||||
{
|
||||
<MudText Typo="Typo.body1">
|
||||
@T("No models loaded or available.")
|
||||
</MudText>
|
||||
<MudButton Variant="Variant.Filled" Size="Size.Small" StartIcon="@Icons.Material.Filled.OpenInBrowser" Href="@this.DataLLMProvider.GetModelsOverviewURL(this.HFInferenceProviderId)" Target="_blank">
|
||||
@T("Show available models")
|
||||
</MudButton>
|
||||
<MudTextField
|
||||
T="string"
|
||||
@bind-Text="@this.dataManuallyModel"
|
||||
Label="@T("Model")"
|
||||
Adornment="Adornment.Start"
|
||||
AdornmentIcon="@Icons.Material.Filled.FaceRetouchingNatural"
|
||||
AdornmentColor="Color.Info"
|
||||
Validation="@this.ValidateManuallyModel"
|
||||
UserAttributes="@SPELLCHECK_ATTRIBUTES"
|
||||
HelperText="@T("Currently, we cannot query the models for the selected provider and/or host. Therefore, please enter the model name manually.")"
|
||||
/>
|
||||
}
|
||||
else
|
||||
{
|
||||
<MudSelect @bind-Value="@this.DataModel"
|
||||
OpenIcon="@Icons.Material.Filled.FaceRetouchingNatural" AdornmentColor="Color.Info"
|
||||
Adornment="Adornment.Start" Validation="@this.providerValidation.ValidatingModel">
|
||||
@foreach (var model in this.availableModels)
|
||||
{
|
||||
<MudSelectItem Value="@model">
|
||||
@model
|
||||
</MudSelectItem>
|
||||
}
|
||||
</MudSelect>
|
||||
<MudButton Disabled="@(!this.DataLLMProvider.CanLoadModels(this.DataHost, this.dataAPIKey))" Variant="Variant.Filled" Size="Size.Small" StartIcon="@Icons.Material.Filled.Refresh" OnClick="@this.ReloadModels">
|
||||
@T("Load models")
|
||||
</MudButton>
|
||||
@if(this.availableModels.Count is 0)
|
||||
{
|
||||
<MudText Typo="Typo.body1">
|
||||
@T("No models loaded or available.")
|
||||
</MudText>
|
||||
}
|
||||
else
|
||||
{
|
||||
<MudSelect @bind-Value="@this.DataModel"
|
||||
OpenIcon="@Icons.Material.Filled.FaceRetouchingNatural" AdornmentColor="Color.Info"
|
||||
Adornment="Adornment.Start" Validation="@this.providerValidation.ValidatingModel">
|
||||
@foreach (var model in this.availableModels)
|
||||
{
|
||||
<MudSelectItem Value="@model">
|
||||
@model
|
||||
</MudSelectItem>
|
||||
}
|
||||
</MudSelect>
|
||||
}
|
||||
}
|
||||
</MudStack>
|
||||
@if (!string.IsNullOrWhiteSpace(this.dataLoadingModelsIssue))
|
||||
{
|
||||
<MudAlert Severity="Severity.Error" Class="mt-3">
|
||||
@this.dataLoadingModelsIssue
|
||||
</MudAlert>
|
||||
}
|
||||
</MudStack>
|
||||
</MudField>
|
||||
</MudField>
|
||||
}
|
||||
else
|
||||
{
|
||||
<MudField FullWidth="true" Label="@T("Model selection")" Variant="Variant.Outlined" Class="mb-3">
|
||||
<MudText Typo="Typo.body1">
|
||||
@T("This host uses the model configured at the provider level. No model selection is available.")
|
||||
</MudText>
|
||||
</MudField>
|
||||
}
|
||||
|
||||
@* ReSharper disable once CSharpWarnings::CS8974 *@
|
||||
<MudTextField
|
||||
|
||||
@ -84,6 +84,9 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
|
||||
[Inject]
|
||||
private RustService RustService { get; init; } = null!;
|
||||
|
||||
[Inject]
|
||||
private ILogger<ProviderDialog> Logger { get; init; } = null!;
|
||||
|
||||
private static readonly Dictionary<string, object?> SPELLCHECK_ATTRIBUTES = new();
|
||||
|
||||
/// <summary>
|
||||
@ -97,6 +100,7 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
|
||||
private string dataManuallyModel = string.Empty;
|
||||
private string dataAPIKeyStorageIssue = string.Empty;
|
||||
private string dataEditingPreviousInstanceName = string.Empty;
|
||||
private string dataLoadingModelsIssue = string.Empty;
|
||||
private bool showExpertSettings;
|
||||
|
||||
// We get the form reference from Blazor code to validate it manually:
|
||||
@ -115,25 +119,36 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
|
||||
GetPreviousInstanceName = () => this.dataEditingPreviousInstanceName,
|
||||
GetUsedInstanceNames = () => this.UsedInstanceNames,
|
||||
GetHost = () => this.DataHost,
|
||||
IsModelProvidedManually = () => this.DataLLMProvider.IsLLMModelProvidedManually(),
|
||||
};
|
||||
}
|
||||
|
||||
private AIStudio.Settings.Provider CreateProviderSettings()
|
||||
{
|
||||
var cleanedHostname = this.DataHostname.Trim();
|
||||
|
||||
// Determine the model based on the provider and host configuration:
|
||||
Model model;
|
||||
if (this.DataLLMProvider.IsLLMModelSelectionHidden(this.DataHost))
|
||||
{
|
||||
// Use system model placeholder for hosts that don't support model selection (e.g., llama.cpp):
|
||||
model = Model.SYSTEM_MODEL;
|
||||
}
|
||||
else if (this.DataLLMProvider is LLMProviders.FIREWORKS or LLMProviders.HUGGINGFACE)
|
||||
{
|
||||
// These providers require manual model entry:
|
||||
model = new Model(this.dataManuallyModel, null);
|
||||
}
|
||||
else
|
||||
model = this.DataModel;
|
||||
|
||||
return new()
|
||||
{
|
||||
Num = this.DataNum,
|
||||
Id = this.DataId,
|
||||
InstanceName = this.DataInstanceName,
|
||||
UsedLLMProvider = this.DataLLMProvider,
|
||||
|
||||
Model = this.DataLLMProvider switch
|
||||
{
|
||||
LLMProviders.FIREWORKS or LLMProviders.HUGGINGFACE => new Model(this.dataManuallyModel, null),
|
||||
_ => this.DataModel
|
||||
},
|
||||
|
||||
Model = model,
|
||||
IsSelfHosted = this.DataLLMProvider is LLMProviders.SELF_HOSTED,
|
||||
IsEnterpriseConfiguration = false,
|
||||
Hostname = cleanedHostname.EndsWith('/') ? cleanedHostname[..^1] : cleanedHostname,
|
||||
@ -222,7 +237,16 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
|
||||
await this.form.Validate();
|
||||
if (!string.IsNullOrWhiteSpace(this.dataAPIKeyStorageIssue))
|
||||
this.dataAPIKeyStorageIssue = string.Empty;
|
||||
|
||||
|
||||
// Manually validate the model selection (needed when no models are loaded
|
||||
// and the MudSelect is not rendered):
|
||||
var modelValidationError = this.providerValidation.ValidatingModel(this.DataModel);
|
||||
if (!string.IsNullOrWhiteSpace(modelValidationError))
|
||||
{
|
||||
this.dataIssues = [..this.dataIssues, modelValidationError];
|
||||
this.dataIsValid = false;
|
||||
}
|
||||
|
||||
// When the data is not valid, we don't store it:
|
||||
if (!this.dataIsValid)
|
||||
return;
|
||||
@ -264,21 +288,40 @@ public partial class ProviderDialog : MSGComponentBase, ISecretId
|
||||
await this.form.Validate();
|
||||
}
|
||||
}
|
||||
|
||||
private void OnHostChanged(Host selectedHost)
|
||||
{
|
||||
// When the host changes, reset the model selection state:
|
||||
this.DataHost = selectedHost;
|
||||
this.DataModel = default;
|
||||
this.dataManuallyModel = string.Empty;
|
||||
this.availableModels.Clear();
|
||||
this.dataLoadingModelsIssue = string.Empty;
|
||||
}
|
||||
|
||||
private async Task ReloadModels()
|
||||
{
|
||||
this.dataLoadingModelsIssue = string.Empty;
|
||||
var currentProviderSettings = this.CreateProviderSettings();
|
||||
var provider = currentProviderSettings.CreateProvider();
|
||||
if(provider is NoProvider)
|
||||
if (provider is NoProvider)
|
||||
return;
|
||||
|
||||
var models = await provider.GetTextModels(this.dataAPIKey);
|
||||
|
||||
// Order descending by ID means that the newest models probably come first:
|
||||
var orderedModels = models.OrderByDescending(n => n.Id);
|
||||
|
||||
this.availableModels.Clear();
|
||||
this.availableModels.AddRange(orderedModels);
|
||||
|
||||
try
|
||||
{
|
||||
var models = await provider.GetTextModels(this.dataAPIKey);
|
||||
|
||||
// Order descending by ID means that the newest models probably come first:
|
||||
var orderedModels = models.OrderByDescending(n => n.Id);
|
||||
|
||||
this.availableModels.Clear();
|
||||
this.availableModels.AddRange(orderedModels);
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
this.Logger.LogError($"Failed to load models from provider '{this.DataLLMProvider}' (host={this.DataHost}, hostname='{this.DataHostname}'): {e.Message}");
|
||||
this.dataLoadingModelsIssue = T("We are currently unable to communicate with the provider to load models. Please try again later.");
|
||||
}
|
||||
}
|
||||
|
||||
private string APIKeyText => this.DataLLMProvider switch
|
||||
|
||||
@ -44,7 +44,7 @@
|
||||
|
||||
@if (this.DataLLMProvider.IsHostNeeded())
|
||||
{
|
||||
<MudSelect @bind-Value="@this.DataHost" Label="@T("Host")" Class="mb-3" OpenIcon="@Icons.Material.Filled.ExpandMore" AdornmentColor="Color.Info" Adornment="Adornment.Start" Validation="@this.providerValidation.ValidatingHost">
|
||||
<MudSelect T="Host" Value="@this.DataHost" ValueChanged="@this.OnHostChanged" Label="@T("Host")" Class="mb-3" OpenIcon="@Icons.Material.Filled.ExpandMore" AdornmentColor="Color.Info" Adornment="Adornment.Start" Validation="@this.providerValidation.ValidatingHost">
|
||||
@foreach (Host host in Enum.GetValues(typeof(Host)))
|
||||
{
|
||||
if (host.IsTranscriptionSupported())
|
||||
@ -57,51 +57,68 @@
|
||||
</MudSelect>
|
||||
}
|
||||
|
||||
<MudField FullWidth="true" Label="@T("Model selection")" Variant="Variant.Outlined" Class="mb-3">
|
||||
<MudStack Row="@true" AlignItems="AlignItems.Center" StretchItems="StretchItems.End">
|
||||
@if (this.DataLLMProvider.IsTranscriptionModelProvidedManually(this.DataHost))
|
||||
{
|
||||
<MudTextField
|
||||
T="string"
|
||||
@bind-Text="@this.dataManuallyModel"
|
||||
Label="@T("Model")"
|
||||
Class="mb-3"
|
||||
Adornment="Adornment.Start"
|
||||
AdornmentIcon="@Icons.Material.Filled.Dns"
|
||||
AdornmentColor="Color.Info"
|
||||
Validation="@this.ValidateManuallyModel"
|
||||
UserAttributes="@SPELLCHECK_ATTRIBUTES"
|
||||
HelperText="@T("Currently, we cannot query the transcription models for the selected provider and/or host. Therefore, please enter the model name manually.")"
|
||||
/>
|
||||
}
|
||||
else
|
||||
{
|
||||
<MudButton Disabled="@(!this.DataLLMProvider.CanLoadModels(this.DataHost, this.dataAPIKey))" Variant="Variant.Filled" Size="Size.Small" StartIcon="@Icons.Material.Filled.Refresh" OnClick="@this.ReloadModels">
|
||||
@T("Load")
|
||||
</MudButton>
|
||||
@if(this.availableModels.Count is 0)
|
||||
@if (!this.DataLLMProvider.IsTranscriptionModelSelectionHidden(this.DataHost))
|
||||
{
|
||||
<MudField FullWidth="true" Label="@T("Model selection")" Variant="Variant.Outlined" Class="mb-3">
|
||||
<MudStack Row="@true" AlignItems="AlignItems.Center" StretchItems="StretchItems.End">
|
||||
@if (this.DataLLMProvider.IsTranscriptionModelProvidedManually(this.DataHost))
|
||||
{
|
||||
<MudText Typo="Typo.body1">
|
||||
@T("No models loaded or available.")
|
||||
</MudText>
|
||||
<MudTextField
|
||||
T="string"
|
||||
@bind-Text="@this.dataManuallyModel"
|
||||
Label="@T("Model")"
|
||||
Class="mb-3"
|
||||
Adornment="Adornment.Start"
|
||||
AdornmentIcon="@Icons.Material.Filled.Dns"
|
||||
AdornmentColor="Color.Info"
|
||||
Validation="@this.ValidateManuallyModel"
|
||||
UserAttributes="@SPELLCHECK_ATTRIBUTES"
|
||||
HelperText="@T("Currently, we cannot query the transcription models for the selected provider and/or host. Therefore, please enter the model name manually.")"
|
||||
/>
|
||||
}
|
||||
else
|
||||
{
|
||||
<MudSelect Disabled="@this.IsNoneProvider" @bind-Value="@this.DataModel" Label="@T("Model")"
|
||||
OpenIcon="@Icons.Material.Filled.FaceRetouchingNatural"
|
||||
AdornmentColor="Color.Info" Adornment="Adornment.Start"
|
||||
Validation="@this.providerValidation.ValidatingModel">
|
||||
@foreach (var model in this.availableModels)
|
||||
{
|
||||
<MudSelectItem Value="@model">
|
||||
@model
|
||||
</MudSelectItem>
|
||||
}
|
||||
</MudSelect>
|
||||
<MudButton Disabled="@(!this.DataLLMProvider.CanLoadModels(this.DataHost, this.dataAPIKey))" Variant="Variant.Filled" Size="Size.Small" StartIcon="@Icons.Material.Filled.Refresh" OnClick="@this.ReloadModels">
|
||||
@T("Load")
|
||||
</MudButton>
|
||||
@if(this.availableModels.Count is 0)
|
||||
{
|
||||
<MudText Typo="Typo.body1">
|
||||
@T("No models loaded or available.")
|
||||
</MudText>
|
||||
}
|
||||
else
|
||||
{
|
||||
<MudSelect Disabled="@this.IsNoneProvider" @bind-Value="@this.DataModel" Label="@T("Model")"
|
||||
OpenIcon="@Icons.Material.Filled.FaceRetouchingNatural"
|
||||
AdornmentColor="Color.Info" Adornment="Adornment.Start"
|
||||
Validation="@this.providerValidation.ValidatingModel">
|
||||
@foreach (var model in this.availableModels)
|
||||
{
|
||||
<MudSelectItem Value="@model">
|
||||
@model
|
||||
</MudSelectItem>
|
||||
}
|
||||
</MudSelect>
|
||||
}
|
||||
}
|
||||
</MudStack>
|
||||
@if (!string.IsNullOrWhiteSpace(this.dataLoadingModelsIssue))
|
||||
{
|
||||
<MudAlert Severity="Severity.Error" Class="mt-3">
|
||||
@this.dataLoadingModelsIssue
|
||||
</MudAlert>
|
||||
}
|
||||
</MudStack>
|
||||
</MudField>
|
||||
</MudField>
|
||||
}
|
||||
else
|
||||
{
|
||||
<MudField FullWidth="true" Label="@T("Model selection")" Variant="Variant.Outlined" Class="mb-3">
|
||||
<MudText Typo="Typo.body1">
|
||||
@T("This host uses the model configured at the provider level. No model selection is available.")
|
||||
</MudText>
|
||||
</MudField>
|
||||
}
|
||||
|
||||
@* ReSharper disable once CSharpWarnings::CS8974 *@
|
||||
<MudTextField
|
||||
|
||||
@ -71,7 +71,10 @@ public partial class TranscriptionProviderDialog : MSGComponentBase, ISecretId
|
||||
|
||||
[Inject]
|
||||
private RustService RustService { get; init; } = null!;
|
||||
|
||||
|
||||
[Inject]
|
||||
private ILogger<TranscriptionProviderDialog> Logger { get; init; } = null!;
|
||||
|
||||
private static readonly Dictionary<string, object?> SPELLCHECK_ATTRIBUTES = new();
|
||||
|
||||
/// <summary>
|
||||
@ -85,7 +88,8 @@ public partial class TranscriptionProviderDialog : MSGComponentBase, ISecretId
|
||||
private string dataManuallyModel = string.Empty;
|
||||
private string dataAPIKeyStorageIssue = string.Empty;
|
||||
private string dataEditingPreviousInstanceName = string.Empty;
|
||||
|
||||
private string dataLoadingModelsIssue = string.Empty;
|
||||
|
||||
// We get the form reference from Blazor code to validate it manually:
|
||||
private MudForm form = null!;
|
||||
|
||||
@ -102,31 +106,39 @@ public partial class TranscriptionProviderDialog : MSGComponentBase, ISecretId
|
||||
GetPreviousInstanceName = () => this.dataEditingPreviousInstanceName,
|
||||
GetUsedInstanceNames = () => this.UsedInstanceNames,
|
||||
GetHost = () => this.DataHost,
|
||||
IsModelProvidedManually = () => this.DataLLMProvider.IsTranscriptionModelProvidedManually(this.DataHost),
|
||||
};
|
||||
}
|
||||
|
||||
private TranscriptionProvider CreateTranscriptionProviderSettings()
|
||||
{
|
||||
var cleanedHostname = this.DataHostname.Trim();
|
||||
Model model = default;
|
||||
if(this.DataLLMProvider is LLMProviders.SELF_HOSTED)
|
||||
|
||||
// Determine the model based on the provider and host configuration:
|
||||
Model model;
|
||||
if (this.DataLLMProvider.IsTranscriptionModelSelectionHidden(this.DataHost))
|
||||
{
|
||||
// Use system model placeholder for hosts that don't support model selection (e.g., whisper.cpp):
|
||||
model = Model.SYSTEM_MODEL;
|
||||
}
|
||||
else if (this.DataLLMProvider is LLMProviders.SELF_HOSTED)
|
||||
{
|
||||
switch (this.DataHost)
|
||||
{
|
||||
case Host.OLLAMA:
|
||||
model = new Model(this.dataManuallyModel, null);
|
||||
break;
|
||||
|
||||
|
||||
case Host.VLLM:
|
||||
case Host.LM_STUDIO:
|
||||
case Host.WHISPER_CPP:
|
||||
default:
|
||||
model = this.DataModel;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
model = this.DataModel;
|
||||
|
||||
|
||||
return new()
|
||||
{
|
||||
Num = this.DataNum,
|
||||
@ -216,7 +228,16 @@ public partial class TranscriptionProviderDialog : MSGComponentBase, ISecretId
|
||||
{
|
||||
await this.form.Validate();
|
||||
this.dataAPIKeyStorageIssue = string.Empty;
|
||||
|
||||
|
||||
// Manually validate the model selection (needed when no models are loaded
|
||||
// and the MudSelect is not rendered):
|
||||
var modelValidationError = this.providerValidation.ValidatingModel(this.DataModel);
|
||||
if (!string.IsNullOrWhiteSpace(modelValidationError))
|
||||
{
|
||||
this.dataIssues = [..this.dataIssues, modelValidationError];
|
||||
this.dataIsValid = false;
|
||||
}
|
||||
|
||||
// When the data is not valid, we don't store it:
|
||||
if (!this.dataIsValid)
|
||||
return;
|
||||
@ -243,7 +264,7 @@ public partial class TranscriptionProviderDialog : MSGComponentBase, ISecretId
|
||||
{
|
||||
if (this.DataLLMProvider is LLMProviders.SELF_HOSTED && string.IsNullOrWhiteSpace(manuallyModel))
|
||||
return T("Please enter a transcription model name.");
|
||||
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -258,21 +279,40 @@ public partial class TranscriptionProviderDialog : MSGComponentBase, ISecretId
|
||||
await this.form.Validate();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void OnHostChanged(Host selectedHost)
|
||||
{
|
||||
// When the host changes, reset the model selection state:
|
||||
this.DataHost = selectedHost;
|
||||
this.DataModel = default;
|
||||
this.dataManuallyModel = string.Empty;
|
||||
this.availableModels.Clear();
|
||||
this.dataLoadingModelsIssue = string.Empty;
|
||||
}
|
||||
|
||||
private async Task ReloadModels()
|
||||
{
|
||||
this.dataLoadingModelsIssue = string.Empty;
|
||||
var currentTranscriptionProviderSettings = this.CreateTranscriptionProviderSettings();
|
||||
var provider = currentTranscriptionProviderSettings.CreateProvider();
|
||||
if(provider is NoProvider)
|
||||
if (provider is NoProvider)
|
||||
return;
|
||||
|
||||
var models = await provider.GetTranscriptionModels(this.dataAPIKey);
|
||||
|
||||
// Order descending by ID means that the newest models probably come first:
|
||||
var orderedModels = models.OrderByDescending(n => n.Id);
|
||||
|
||||
this.availableModels.Clear();
|
||||
this.availableModels.AddRange(orderedModels);
|
||||
|
||||
try
|
||||
{
|
||||
var models = await provider.GetTranscriptionModels(this.dataAPIKey);
|
||||
|
||||
// Order descending by ID means that the newest models probably come first:
|
||||
var orderedModels = models.OrderByDescending(n => n.Id);
|
||||
|
||||
this.availableModels.Clear();
|
||||
this.availableModels.AddRange(orderedModels);
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
this.Logger.LogError($"Failed to load models from provider '{this.DataLLMProvider}' (host={this.DataHost}, hostname='{this.DataHostname}'): {e.Message}");;
|
||||
this.dataLoadingModelsIssue = T("We are currently unable to communicate with the provider to load models. Please try again later.");
|
||||
}
|
||||
}
|
||||
|
||||
private string APIKeyText => this.DataLLMProvider switch
|
||||
|
||||
@ -384,6 +384,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::CODING::COMMONCODINGLANGUAGEEXTENSIONS::T
|
||||
-- None
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::CODING::COMMONCODINGLANGUAGEEXTENSIONS::T810547195"] = "Keine"
|
||||
|
||||
-- {0} - Document Analysis Session
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T108097007"] = "{0} – Sitzung zur Dokumentenanalyse"
|
||||
|
||||
-- Use the analysis and output rules to define how the AI evaluates your documents and formats the results.
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T1155482668"] = "Verwenden Sie die Analyse- und Ausgaberegeln, um festzulegen, wie die KI Ihre Dokumente bewertet und die Ergebnisse formatiert."
|
||||
|
||||
@ -438,6 +441,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTA
|
||||
-- Export policy as configuration section
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2556564432"] = "Exportieren Sie das Regelwerk als Konfigurationsabschnitt"
|
||||
|
||||
-- The result of your previous document analysis session.
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2570551055"] = "Das Ergebnis Ihrer vorherigen Dokumentenanalyse-Sitzung."
|
||||
|
||||
-- Are you sure you want to delete the document analysis policy '{0}'?
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2582525917"] = "Möchten Sie das Regelwerk '{0}' wirklich löschen?"
|
||||
|
||||
@ -471,6 +477,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTA
|
||||
-- Document Analysis Assistant
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T348883878"] = "Assistent für die Dokumentenanalyse"
|
||||
|
||||
-- Empty
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T3512147854"] = "Leer"
|
||||
|
||||
-- Analysis and output rules
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T3555314296"] = "Analyse- und Ausgaberegeln"
|
||||
|
||||
@ -2103,6 +2112,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T14695
|
||||
-- Add Embedding
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1738753945"] = "Einbettung hinzufügen"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1760715963"] = "Verwendet das vom Anbieter konfigurierte Modell"
|
||||
|
||||
-- Are you sure you want to delete the embedding provider '{0}'?
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1825371968"] = "Sind Sie sicher, dass Sie den Einbettungsanbieter '{0}' löschen möchten?"
|
||||
|
||||
@ -2166,11 +2178,14 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T162847
|
||||
-- Description
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1725856265"] = "Beschreibung"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1760715963"] = "Verwendet das vom Anbieter konfigurierte Modell"
|
||||
|
||||
-- Add Provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1806589097"] = "Anbieter hinzufügen"
|
||||
|
||||
-- Configure LLM Providers
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1810190350"] = "Anbieter für LLM konfigurieren"
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1810190350"] = "Anbieter für LLMs konfigurieren"
|
||||
|
||||
-- Edit LLM Provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1868766523"] = "LLM-Anbieter bearbeiten"
|
||||
@ -2206,10 +2221,7 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T284206
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T2911731076"] = "Noch keine Anbieter konfiguriert."
|
||||
|
||||
-- Configured LLM Providers
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3019870540"] = "Konfigurierte Anbieter für LLM"
|
||||
|
||||
-- as selected by provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3082210376"] = "wie vom Anbieter ausgewählt"
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3019870540"] = "Konfigurierte Anbieter für LLMs"
|
||||
|
||||
-- Edit
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3267849393"] = "Bearbeiten"
|
||||
@ -2268,6 +2280,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T14
|
||||
-- Add transcription provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T1645238629"] = "Anbieter für Transkriptionen hinzufügen"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T1760715963"] = "Verwendet das vom Anbieter konfigurierte Modell"
|
||||
|
||||
-- Add Transcription Provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T2066315685"] = "Anbieter für Transkriptionen hinzufügen"
|
||||
|
||||
@ -2292,6 +2307,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T40
|
||||
-- Configured Transcription Providers
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T4210863523"] = "Konfigurierte Anbieter für Transkriptionen"
|
||||
|
||||
-- With the support of transcription models, MindWork AI Studio can convert human speech into text. This is useful, for example, when you need to dictate text. You can choose from dedicated transcription models, but not multimodal LLMs (large language models) that can handle both speech and text. The configuration of multimodal models is done in the 'Configure LLM providers' section.
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T584860404"] = "Mit Unterstützung von Modellen für Transkriptionen kann MindWork AI Studio menschliche Sprache in Text umwandeln. Das ist zum Beispiel hilfreich, wenn Sie Texte diktieren möchten. Sie können aus speziellen Modellen für Transkriptionen wählen, jedoch nicht aus multimodalen LLMs (Large Language Models), die sowohl Sprache als auch Text verarbeiten können. Die Einrichtung multimodaler Modelle erfolgt im Abschnitt „Anbieter für LLMs konfigurieren“."
|
||||
|
||||
-- This transcription provider is managed by your organization.
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T756131076"] = "Dieser Anbieter für Transkriptionen wird von Ihrer Organisation verwaltet."
|
||||
|
||||
@ -2301,9 +2319,6 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T78
|
||||
-- Are you sure you want to delete the transcription provider '{0}'?
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T789660305"] = "Möchten Sie den Anbieter für Transkriptionen „{0}“ wirklich löschen?"
|
||||
|
||||
-- With the support of transcription models, MindWork AI Studio can convert human speech into text. This is useful, for example, when you need to dictate text. You can choose from dedicated transcription models, but not multimodal LLMs (large language models) that can handle both speech and text. The configuration of multimodal models is done in the \"Configure providers\" section.
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T584860404"] = "Mit Unterstützung von Modellen für Transkriptionen kann MindWork AI Studio menschliche Sprache in Text umwandeln. Das ist zum Beispiel hilfreich, wenn Sie Texte diktieren möchten. Sie können aus speziellen Modellen für Transkriptionen wählen, jedoch nicht aus multimodalen LLMs (Large Language Models), die sowohl Sprache als auch Text verarbeiten können. Die Einrichtung multimodaler Modelle erfolgt im Abschnitt „Anbieter für LLM konfigurieren“."
|
||||
|
||||
-- Provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T900237532"] = "Anbieter"
|
||||
|
||||
@ -3207,6 +3222,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T290547799"] = "Der
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T416738168"] = "Modellauswahl"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T504465522"] = "Wir können derzeit nicht mit dem Anbieter kommunizieren, um Modelle zu laden. Bitte versuchen Sie es später erneut."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
@ -3414,12 +3432,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Experten-Ei
|
||||
-- Show available models
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Verfügbare Modelle anzeigen"
|
||||
|
||||
-- This host uses the model configured at the provider level. No model selection is available.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3783329915"] = "Dieser Host verwendet das auf Anbieterebene konfigurierte Modell. Es ist keine Modellauswahl verfügbar."
|
||||
|
||||
-- Currently, we cannot query the models for the selected provider and/or host. Therefore, please enter the model name manually.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T4116737656"] = "Derzeit können wir die Modelle für den ausgewählten Anbieter und/oder Host nicht abfragen. Bitte geben Sie daher den Modellnamen manuell ein."
|
||||
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T416738168"] = "Modellauswahl"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T504465522"] = "Wir können derzeit nicht mit dem Anbieter kommunizieren, um Modelle zu laden. Bitte versuchen Sie es später erneut."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
@ -4635,9 +4659,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T2842060373"] =
|
||||
-- Please enter a transcription model name.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T3703662664"] = "Bitte geben Sie den Namen eines Transkriptionsmodells ein."
|
||||
|
||||
-- This host uses the model configured at the provider level. No model selection is available.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T3783329915"] = "Dieser Host verwendet das auf Anbieterebene konfigurierte Modell. Eine Modellauswahl ist nicht verfügbar."
|
||||
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T416738168"] = "Modellauswahl"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T504465522"] = "Wir können derzeit nicht mit dem Anbieter kommunizieren, um Modelle zu laden. Bitte versuchen Sie es später erneut."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
|
||||
@ -384,6 +384,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::CODING::COMMONCODINGLANGUAGEEXTENSIONS::T
|
||||
-- None
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::CODING::COMMONCODINGLANGUAGEEXTENSIONS::T810547195"] = "None"
|
||||
|
||||
-- {0} - Document Analysis Session
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T108097007"] = "{0} - Document Analysis Session"
|
||||
|
||||
-- Use the analysis and output rules to define how the AI evaluates your documents and formats the results.
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T1155482668"] = "Use the analysis and output rules to define how the AI evaluates your documents and formats the results."
|
||||
|
||||
@ -438,6 +441,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTA
|
||||
-- Export policy as configuration section
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2556564432"] = "Export policy as configuration section"
|
||||
|
||||
-- The result of your previous document analysis session.
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2570551055"] = "The result of your previous document analysis session."
|
||||
|
||||
-- Are you sure you want to delete the document analysis policy '{0}'?
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T2582525917"] = "Are you sure you want to delete the document analysis policy '{0}'?"
|
||||
|
||||
@ -471,6 +477,9 @@ UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTA
|
||||
-- Document Analysis Assistant
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T348883878"] = "Document Analysis Assistant"
|
||||
|
||||
-- Empty
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T3512147854"] = "Empty"
|
||||
|
||||
-- Analysis and output rules
|
||||
UI_TEXT_CONTENT["AISTUDIO::ASSISTANTS::DOCUMENTANALYSIS::DOCUMENTANALYSISASSISTANT::T3555314296"] = "Analysis and output rules"
|
||||
|
||||
@ -2103,6 +2112,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T14695
|
||||
-- Add Embedding
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1738753945"] = "Add Embedding"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1760715963"] = "Uses the provider-configured model"
|
||||
|
||||
-- Are you sure you want to delete the embedding provider '{0}'?
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELEMBEDDINGS::T1825371968"] = "Are you sure you want to delete the embedding provider '{0}'?"
|
||||
|
||||
@ -2166,6 +2178,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T162847
|
||||
-- Description
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1725856265"] = "Description"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1760715963"] = "Uses the provider-configured model"
|
||||
|
||||
-- Add Provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T1806589097"] = "Add Provider"
|
||||
|
||||
@ -2208,9 +2223,6 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T291173
|
||||
-- Configured LLM Providers
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3019870540"] = "Configured LLM Providers"
|
||||
|
||||
-- as selected by provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3082210376"] = "as selected by provider"
|
||||
|
||||
-- Edit
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELPROVIDERS::T3267849393"] = "Edit"
|
||||
|
||||
@ -2268,6 +2280,9 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T14
|
||||
-- Add transcription provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T1645238629"] = "Add transcription provider"
|
||||
|
||||
-- Uses the provider-configured model
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T1760715963"] = "Uses the provider-configured model"
|
||||
|
||||
-- Add Transcription Provider
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T2066315685"] = "Add Transcription Provider"
|
||||
|
||||
@ -2292,7 +2307,7 @@ UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T40
|
||||
-- Configured Transcription Providers
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T4210863523"] = "Configured Transcription Providers"
|
||||
|
||||
-- With the support of transcription models, MindWork AI Studio can convert human speech into text. This is useful, for example, when you need to dictate text. You can choose from dedicated transcription models, but not multimodal LLMs (large language models) that can handle both speech and text. The configuration of multimodal models is done in the 'Configure providers' section.
|
||||
-- With the support of transcription models, MindWork AI Studio can convert human speech into text. This is useful, for example, when you need to dictate text. You can choose from dedicated transcription models, but not multimodal LLMs (large language models) that can handle both speech and text. The configuration of multimodal models is done in the 'Configure LLM providers' section.
|
||||
UI_TEXT_CONTENT["AISTUDIO::COMPONENTS::SETTINGS::SETTINGSPANELTRANSCRIPTION::T584860404"] = "With the support of transcription models, MindWork AI Studio can convert human speech into text. This is useful, for example, when you need to dictate text. You can choose from dedicated transcription models, but not multimodal LLMs (large language models) that can handle both speech and text. The configuration of multimodal models is done in the 'Configure LLM providers' section."
|
||||
|
||||
-- This transcription provider is managed by your organization.
|
||||
@ -3207,6 +3222,9 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T290547799"] = "Cur
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T416738168"] = "Model selection"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T504465522"] = "We are currently unable to communicate with the provider to load models. Please try again later."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::EMBEDDINGPROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
@ -3414,12 +3432,18 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3361153305"] = "Show Expert
|
||||
-- Show available models
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3763891899"] = "Show available models"
|
||||
|
||||
-- This host uses the model configured at the provider level. No model selection is available.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T3783329915"] = "This host uses the model configured at the provider level. No model selection is available."
|
||||
|
||||
-- Currently, we cannot query the models for the selected provider and/or host. Therefore, please enter the model name manually.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T4116737656"] = "Currently, we cannot query the models for the selected provider and/or host. Therefore, please enter the model name manually."
|
||||
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T416738168"] = "Model selection"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T504465522"] = "We are currently unable to communicate with the provider to load models. Please try again later."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::PROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
@ -4635,9 +4659,15 @@ UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T2842060373"] =
|
||||
-- Please enter a transcription model name.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T3703662664"] = "Please enter a transcription model name."
|
||||
|
||||
-- This host uses the model configured at the provider level. No model selection is available.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T3783329915"] = "This host uses the model configured at the provider level. No model selection is available."
|
||||
|
||||
-- Model selection
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T416738168"] = "Model selection"
|
||||
|
||||
-- We are currently unable to communicate with the provider to load models. Please try again later.
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T504465522"] = "We are currently unable to communicate with the provider to load models. Please try again later."
|
||||
|
||||
-- Host
|
||||
UI_TEXT_CONTENT["AISTUDIO::DIALOGS::TRANSCRIPTIONPROVIDERDIALOG::T808120719"] = "Host"
|
||||
|
||||
|
||||
@ -554,10 +554,22 @@ public abstract class BaseProvider : IProvider, ISecretId
|
||||
|
||||
await using var fileStream = File.OpenRead(audioFilePath);
|
||||
using var fileContent = new StreamContent(fileStream);
|
||||
|
||||
// Set the content type based on the file extension:
|
||||
fileContent.Headers.ContentType = new MediaTypeHeaderValue(mimeType);
|
||||
|
||||
|
||||
// Add the file content to the form data:
|
||||
form.Add(fileContent, "file", Path.GetFileName(audioFilePath));
|
||||
form.Add(new StringContent(transcriptionModel.Id), "model");
|
||||
|
||||
//
|
||||
// Add the model name to the form data. Ensure that a model name is always provided.
|
||||
// Otherwise, the StringContent constructor will throw an exception.
|
||||
//
|
||||
var modelName = transcriptionModel.Id;
|
||||
if (string.IsNullOrWhiteSpace(modelName))
|
||||
modelName = "placeholder";
|
||||
|
||||
form.Add(new StringContent(modelName), "model");
|
||||
|
||||
using var request = new HttpRequestMessage(HttpMethod.Post, host.TranscriptionURL());
|
||||
request.Content = form;
|
||||
|
||||
@ -327,6 +327,32 @@ public static class LLMProvidersExtensions
|
||||
_ => false,
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Determines if the model selection should be completely hidden for LLM providers.
|
||||
/// This is the case when the host does not support model selection (e.g., llama.cpp).
|
||||
/// </summary>
|
||||
/// <param name="provider">The provider.</param>
|
||||
/// <param name="host">The host for self-hosted providers.</param>
|
||||
/// <returns>True if model selection should be hidden; otherwise, false.</returns>
|
||||
public static bool IsLLMModelSelectionHidden(this LLMProviders provider, Host host) => provider switch
|
||||
{
|
||||
LLMProviders.SELF_HOSTED => host is Host.LLAMA_CPP,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
/// <summary>
|
||||
/// Determines if the model selection should be completely hidden for transcription providers.
|
||||
/// This is the case when the host does not support model selection (e.g., whisper.cpp).
|
||||
/// </summary>
|
||||
/// <param name="provider">The provider.</param>
|
||||
/// <param name="host">The host for self-hosted providers.</param>
|
||||
/// <returns>True if model selection should be hidden; otherwise, false.</returns>
|
||||
public static bool IsTranscriptionModelSelectionHidden(this LLMProviders provider, Host host) => provider switch
|
||||
{
|
||||
LLMProviders.SELF_HOSTED => host is Host.WHISPER_CPP,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
public static bool IsHostNeeded(this LLMProviders provider) => provider switch
|
||||
{
|
||||
LLMProviders.SELF_HOSTED => true,
|
||||
@ -391,13 +417,13 @@ public static class LLMProvidersExtensions
|
||||
{
|
||||
case Host.NONE:
|
||||
case Host.LLAMA_CPP:
|
||||
case Host.WHISPER_CPP:
|
||||
default:
|
||||
return false;
|
||||
|
||||
case Host.OLLAMA:
|
||||
case Host.LM_STUDIO:
|
||||
case Host.VLLM:
|
||||
case Host.WHISPER_CPP:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,6 +9,22 @@ namespace AIStudio.Provider;
|
||||
/// <param name="DisplayName">The model's display name.</param>
|
||||
public readonly record struct Model(string Id, string? DisplayName)
|
||||
{
|
||||
/// <summary>
|
||||
/// Special model ID used when the model is selected by the system/host
|
||||
/// and cannot be changed by the user (e.g., llama.cpp, whisper.cpp).
|
||||
/// </summary>
|
||||
private const string SYSTEM_MODEL_ID = "::system::";
|
||||
|
||||
/// <summary>
|
||||
/// Creates a system-configured model placeholder.
|
||||
/// </summary>
|
||||
public static readonly Model SYSTEM_MODEL = new(SYSTEM_MODEL_ID, null);
|
||||
|
||||
/// <summary>
|
||||
/// Checks if this model is the system-configured placeholder.
|
||||
/// </summary>
|
||||
public bool IsSystemModel => this == SYSTEM_MODEL;
|
||||
|
||||
private static string TB(string fallbackEN) => I18N.I.T(fallbackEN, typeof(Model).Namespace, nameof(Model));
|
||||
|
||||
#region Overrides of ValueType
|
||||
|
||||
@ -149,31 +149,30 @@ public sealed class ProviderSelfHosted(Host host, string hostname) : BaseProvide
|
||||
}
|
||||
|
||||
/// <inheritdoc />
|
||||
public override Task<IEnumerable<Provider.Model>> GetTranscriptionModels(string? apiKeyProvisional = null, CancellationToken token = default)
|
||||
public override async Task<IEnumerable<Provider.Model>> GetTranscriptionModels(string? apiKeyProvisional = null, CancellationToken token = default)
|
||||
{
|
||||
try
|
||||
{
|
||||
switch (host)
|
||||
{
|
||||
case Host.WHISPER_CPP:
|
||||
return Task.FromResult<IEnumerable<Provider.Model>>(
|
||||
new List<Provider.Model>
|
||||
{
|
||||
new("loaded-model", TB("Model as configured by whisper.cpp")),
|
||||
});
|
||||
return new List<Provider.Model>
|
||||
{
|
||||
new("loaded-model", TB("Model as configured by whisper.cpp")),
|
||||
};
|
||||
|
||||
case Host.OLLAMA:
|
||||
case Host.VLLM:
|
||||
return this.LoadModels(SecretStoreType.TRANSCRIPTION_PROVIDER, [], [], token, apiKeyProvisional);
|
||||
return await this.LoadModels(SecretStoreType.TRANSCRIPTION_PROVIDER, [], [], token, apiKeyProvisional);
|
||||
|
||||
default:
|
||||
return Task.FromResult(Enumerable.Empty<Provider.Model>());
|
||||
return [];
|
||||
}
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
LOGGER.LogError(e, "Failed to load transcription models from self-hosted provider.");
|
||||
return Task.FromResult(Enumerable.Empty<Provider.Model>());
|
||||
LOGGER.LogError($"Failed to load transcription models from self-hosted provider: {e.Message}");
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -64,7 +64,7 @@ public sealed class TerminalLogger() : ConsoleFormatter(FORMATTER_NAME)
|
||||
{
|
||||
textWriter.WriteLine();
|
||||
foreach (var line in stackTrace.Split('\n'))
|
||||
textWriter.WriteLine($" {line.TrimEnd()}");
|
||||
textWriter.WriteLine($" {colorCode}{line.TrimEnd()}{ANSI_RESET}");
|
||||
}
|
||||
}
|
||||
else
|
||||
|
||||
@ -19,7 +19,9 @@ public sealed class ProviderValidation
|
||||
public Func<IEnumerable<string>> GetUsedInstanceNames { get; init; } = () => [];
|
||||
|
||||
public Func<Host> GetHost { get; init; } = () => Host.NONE;
|
||||
|
||||
|
||||
public Func<bool> IsModelProvidedManually { get; init; } = () => false;
|
||||
|
||||
public string? ValidatingHostname(string hostname)
|
||||
{
|
||||
if(this.GetProvider() != LLMProviders.SELF_HOSTED)
|
||||
@ -70,12 +72,22 @@ public sealed class ProviderValidation
|
||||
|
||||
public string? ValidatingModel(Model model)
|
||||
{
|
||||
if(this.GetProvider() is LLMProviders.SELF_HOSTED && this.GetHost() == Host.LLAMA_CPP)
|
||||
// For NONE providers, no validation is needed:
|
||||
if (this.GetProvider() is LLMProviders.NONE)
|
||||
return null;
|
||||
|
||||
|
||||
// For self-hosted llama.cpp or whisper.cpp, no model selection needed
|
||||
// (model is loaded at startup):
|
||||
if (this.GetProvider() is LLMProviders.SELF_HOSTED && this.GetHost() is Host.LLAMA_CPP or Host.WHISPER_CPP)
|
||||
return null;
|
||||
|
||||
// For manually entered models, this validation doesn't apply:
|
||||
if (this.IsModelProvidedManually())
|
||||
return null;
|
||||
|
||||
if (model == default)
|
||||
return TB("Please select a model.");
|
||||
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
using System.Collections.Concurrent;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using System.Text.Json.Serialization;
|
||||
@ -11,8 +12,50 @@ namespace AIStudio.Tools;
|
||||
|
||||
public static class WorkspaceBehaviour
|
||||
{
|
||||
private static readonly ILogger LOG = Program.LOGGER_FACTORY.CreateLogger(nameof(WorkspaceBehaviour));
|
||||
|
||||
private static string TB(string fallbackEN) => I18N.I.T(fallbackEN, typeof(WorkspaceBehaviour).Namespace, nameof(WorkspaceBehaviour));
|
||||
|
||||
|
||||
/// <summary>
|
||||
/// Semaphores for synchronizing chat storage operations per chat.
|
||||
/// This prevents race conditions when multiple threads try to write
|
||||
/// the same chat file simultaneously.
|
||||
/// </summary>
|
||||
private static readonly ConcurrentDictionary<string, SemaphoreSlim> CHAT_STORAGE_SEMAPHORES = new();
|
||||
|
||||
/// <summary>
|
||||
/// Timeout for acquiring the chat storage semaphore.
|
||||
/// </summary>
|
||||
private static readonly TimeSpan SEMAPHORE_TIMEOUT = TimeSpan.FromSeconds(6);
|
||||
|
||||
private static SemaphoreSlim GetChatSemaphore(Guid workspaceId, Guid chatId)
|
||||
{
|
||||
var key = $"{workspaceId}_{chatId}";
|
||||
return CHAT_STORAGE_SEMAPHORES.GetOrAdd(key, _ => new SemaphoreSlim(1, 1));
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Tries to acquire the chat storage semaphore within the configured timeout.
|
||||
/// </summary>
|
||||
/// <param name="workspaceId">The workspace ID.</param>
|
||||
/// <param name="chatId">The chat ID.</param>
|
||||
/// <param name="callerName">The name of the calling method for logging purposes.</param>
|
||||
/// <returns>A tuple containing whether the semaphore was acquired and the semaphore instance.</returns>
|
||||
private static async Task<(bool Acquired, SemaphoreSlim Semaphore)> TryAcquireChatSemaphoreAsync(Guid workspaceId, Guid chatId, string callerName)
|
||||
{
|
||||
var semaphore = GetChatSemaphore(workspaceId, chatId);
|
||||
var acquired = await semaphore.WaitAsync(SEMAPHORE_TIMEOUT);
|
||||
|
||||
if (!acquired)
|
||||
LOG.LogWarning("Failed to acquire chat storage semaphore within {Timeout} seconds for workspace '{WorkspaceId}', chat '{ChatId}' in method '{CallerName}'. Skipping operation to prevent potential race conditions or deadlocks.",
|
||||
SEMAPHORE_TIMEOUT.TotalSeconds,
|
||||
workspaceId,
|
||||
chatId,
|
||||
callerName);
|
||||
|
||||
return (acquired, semaphore);
|
||||
}
|
||||
|
||||
public static readonly JsonSerializerOptions JSON_OPTIONS = new()
|
||||
{
|
||||
WriteIndented = true,
|
||||
@ -37,35 +80,52 @@ public static class WorkspaceBehaviour
|
||||
|
||||
public static async Task StoreChat(ChatThread chat)
|
||||
{
|
||||
string chatDirectory;
|
||||
if (chat.WorkspaceId == Guid.Empty)
|
||||
chatDirectory = Path.Join(SettingsManager.DataDirectory, "tempChats", chat.ChatId.ToString());
|
||||
else
|
||||
chatDirectory = Path.Join(SettingsManager.DataDirectory, "workspaces", chat.WorkspaceId.ToString(), chat.ChatId.ToString());
|
||||
|
||||
// Ensure the directory exists:
|
||||
Directory.CreateDirectory(chatDirectory);
|
||||
|
||||
// Save the chat name:
|
||||
var chatNamePath = Path.Join(chatDirectory, "name");
|
||||
await File.WriteAllTextAsync(chatNamePath, chat.Name);
|
||||
|
||||
// Save the thread as thread.json:
|
||||
var chatPath = Path.Join(chatDirectory, "thread.json");
|
||||
await File.WriteAllTextAsync(chatPath, JsonSerializer.Serialize(chat, JSON_OPTIONS), Encoding.UTF8);
|
||||
// Try to acquire the semaphore for this specific chat to prevent concurrent writes to the same file:
|
||||
var (acquired, semaphore) = await TryAcquireChatSemaphoreAsync(chat.WorkspaceId, chat.ChatId, nameof(StoreChat));
|
||||
if (!acquired)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
string chatDirectory;
|
||||
if (chat.WorkspaceId == Guid.Empty)
|
||||
chatDirectory = Path.Join(SettingsManager.DataDirectory, "tempChats", chat.ChatId.ToString());
|
||||
else
|
||||
chatDirectory = Path.Join(SettingsManager.DataDirectory, "workspaces", chat.WorkspaceId.ToString(), chat.ChatId.ToString());
|
||||
|
||||
// Ensure the directory exists:
|
||||
Directory.CreateDirectory(chatDirectory);
|
||||
|
||||
// Save the chat name:
|
||||
var chatNamePath = Path.Join(chatDirectory, "name");
|
||||
await File.WriteAllTextAsync(chatNamePath, chat.Name);
|
||||
|
||||
// Save the thread as thread.json:
|
||||
var chatPath = Path.Join(chatDirectory, "thread.json");
|
||||
await File.WriteAllTextAsync(chatPath, JsonSerializer.Serialize(chat, JSON_OPTIONS), Encoding.UTF8);
|
||||
}
|
||||
finally
|
||||
{
|
||||
semaphore.Release();
|
||||
}
|
||||
}
|
||||
|
||||
public static async Task<ChatThread?> LoadChat(LoadChat loadChat)
|
||||
{
|
||||
var chatPath = loadChat.WorkspaceId == Guid.Empty
|
||||
? Path.Join(SettingsManager.DataDirectory, "tempChats", loadChat.ChatId.ToString())
|
||||
: Path.Join(SettingsManager.DataDirectory, "workspaces", loadChat.WorkspaceId.ToString(), loadChat.ChatId.ToString());
|
||||
|
||||
if(!Directory.Exists(chatPath))
|
||||
// Try to acquire the semaphore for this specific chat to prevent concurrent read/writes to the same file:
|
||||
var (acquired, semaphore) = await TryAcquireChatSemaphoreAsync(loadChat.WorkspaceId, loadChat.ChatId, nameof(LoadChat));
|
||||
if (!acquired)
|
||||
return null;
|
||||
|
||||
|
||||
try
|
||||
{
|
||||
var chatPath = loadChat.WorkspaceId == Guid.Empty
|
||||
? Path.Join(SettingsManager.DataDirectory, "tempChats", loadChat.ChatId.ToString())
|
||||
: Path.Join(SettingsManager.DataDirectory, "workspaces", loadChat.WorkspaceId.ToString(), loadChat.ChatId.ToString());
|
||||
|
||||
if(!Directory.Exists(chatPath))
|
||||
return null;
|
||||
|
||||
var chatData = await File.ReadAllTextAsync(Path.Join(chatPath, "thread.json"), Encoding.UTF8);
|
||||
var chat = JsonSerializer.Deserialize<ChatThread>(chatData, JSON_OPTIONS);
|
||||
return chat;
|
||||
@ -74,6 +134,10 @@ public static class WorkspaceBehaviour
|
||||
{
|
||||
return null;
|
||||
}
|
||||
finally
|
||||
{
|
||||
semaphore.Release();
|
||||
}
|
||||
}
|
||||
|
||||
public static async Task<string> LoadWorkspaceName(Guid workspaceId)
|
||||
@ -144,7 +208,19 @@ public static class WorkspaceBehaviour
|
||||
else
|
||||
chatDirectory = Path.Join(SettingsManager.DataDirectory, "workspaces", chat.WorkspaceId.ToString(), chat.ChatId.ToString());
|
||||
|
||||
Directory.Delete(chatDirectory, true);
|
||||
// Try to acquire the semaphore to prevent deleting while another thread is writing:
|
||||
var (acquired, semaphore) = await TryAcquireChatSemaphoreAsync(workspaceId, chatId, nameof(DeleteChat));
|
||||
if (!acquired)
|
||||
return;
|
||||
|
||||
try
|
||||
{
|
||||
Directory.Delete(chatDirectory, true);
|
||||
}
|
||||
finally
|
||||
{
|
||||
semaphore.Release();
|
||||
}
|
||||
}
|
||||
|
||||
private static async Task EnsureWorkspace(Guid workspaceId, string workspaceName)
|
||||
|
||||
@ -25,133 +25,4 @@ window.clearDiv = function (divName) {
|
||||
|
||||
window.scrollToBottom = function(element) {
|
||||
element.scrollIntoView({ behavior: 'smooth', block: 'end', inline: 'nearest' });
|
||||
}
|
||||
|
||||
window.playSound = function(soundPath) {
|
||||
try {
|
||||
const audio = new Audio(soundPath);
|
||||
audio.play().catch(error => {
|
||||
console.warn('Failed to play sound effect:', error);
|
||||
});
|
||||
} catch (error) {
|
||||
console.warn('Error creating audio element:', error);
|
||||
}
|
||||
};
|
||||
|
||||
let mediaRecorder;
|
||||
let actualRecordingMimeType;
|
||||
let changedMimeType = false;
|
||||
let pendingChunkUploads = 0;
|
||||
|
||||
window.audioRecorder = {
|
||||
start: async function (dotnetRef, desiredMimeTypes = []) {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
|
||||
// Play start recording sound effect:
|
||||
window.playSound('/sounds/start_recording.ogg');
|
||||
|
||||
// When only one mime type is provided as a string, convert it to an array:
|
||||
if (typeof desiredMimeTypes === 'string') {
|
||||
desiredMimeTypes = [desiredMimeTypes];
|
||||
}
|
||||
|
||||
// Log sent mime types for debugging:
|
||||
console.log('Audio recording - requested mime types: ', desiredMimeTypes);
|
||||
|
||||
let mimeTypes = desiredMimeTypes.filter(type => typeof type === 'string' && type.trim() !== '');
|
||||
|
||||
// Next, we have to ensure that we have some default mime types to check as well.
|
||||
// In case the provided list does not contain these, we append them:
|
||||
// Use provided mime types or fallback to a default list:
|
||||
const defaultMimeTypes = [
|
||||
'audio/webm',
|
||||
'audio/ogg',
|
||||
'audio/mp4',
|
||||
'audio/mpeg',
|
||||
''// Fallback to browser default
|
||||
];
|
||||
|
||||
defaultMimeTypes.forEach(type => {
|
||||
if (!mimeTypes.includes(type)) {
|
||||
mimeTypes.push(type);
|
||||
}
|
||||
});
|
||||
|
||||
console.log('Audio recording - final mime types to check (included defaults): ', mimeTypes);
|
||||
|
||||
// Find the first supported mime type:
|
||||
actualRecordingMimeType = mimeTypes.find(type =>
|
||||
type === '' || MediaRecorder.isTypeSupported(type)
|
||||
) || '';
|
||||
|
||||
console.log('Audio recording - the browser selected the following mime type for recording: ', actualRecordingMimeType);
|
||||
const options = actualRecordingMimeType ? { mimeType: actualRecordingMimeType } : {};
|
||||
mediaRecorder = new MediaRecorder(stream, options);
|
||||
|
||||
// In case the browser changed the mime type:
|
||||
actualRecordingMimeType = mediaRecorder.mimeType;
|
||||
console.log('Audio recording - actual mime type used by the browser: ', actualRecordingMimeType);
|
||||
|
||||
// Check the list of desired mime types against the actual one:
|
||||
if (!desiredMimeTypes.includes(actualRecordingMimeType)) {
|
||||
changedMimeType = true;
|
||||
console.warn(`Audio recording - requested mime types ('${desiredMimeTypes.join(', ')}') do not include the actual mime type used by the browser ('${actualRecordingMimeType}').`);
|
||||
} else {
|
||||
changedMimeType = false;
|
||||
}
|
||||
|
||||
// Reset the pending uploads counter:
|
||||
pendingChunkUploads = 0;
|
||||
|
||||
// Stream each chunk directly to .NET as it becomes available:
|
||||
mediaRecorder.ondataavailable = async (event) => {
|
||||
if (event.data.size > 0) {
|
||||
pendingChunkUploads++;
|
||||
try {
|
||||
const arrayBuffer = await event.data.arrayBuffer();
|
||||
const uint8Array = new Uint8Array(arrayBuffer);
|
||||
await dotnetRef.invokeMethodAsync('OnAudioChunkReceived', uint8Array);
|
||||
} catch (error) {
|
||||
console.error('Error sending audio chunk to .NET:', error);
|
||||
} finally {
|
||||
pendingChunkUploads--;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mediaRecorder.start(3000); // read the recorded data in 3-second chunks
|
||||
return actualRecordingMimeType;
|
||||
},
|
||||
|
||||
stop: async function () {
|
||||
return new Promise((resolve) => {
|
||||
|
||||
// Add an event listener to handle the stop event:
|
||||
mediaRecorder.onstop = async () => {
|
||||
|
||||
// Wait for all pending chunk uploads to complete before finalizing:
|
||||
console.log(`Audio recording - waiting for ${pendingChunkUploads} pending uploads.`);
|
||||
while (pendingChunkUploads > 0) {
|
||||
await new Promise(r => setTimeout(r, 10)); // wait 10 ms before checking again
|
||||
}
|
||||
|
||||
console.log('Audio recording - all chunks uploaded, finalizing.');
|
||||
|
||||
// Play stop recording sound effect:
|
||||
window.playSound('/sounds/stop_recording.ogg');
|
||||
|
||||
// Stop all tracks to release the microphone:
|
||||
mediaRecorder.stream.getTracks().forEach(track => track.stop());
|
||||
|
||||
// No need to process data here anymore, just signal completion:
|
||||
resolve({
|
||||
mimeType: actualRecordingMimeType,
|
||||
changedMimeType: changedMimeType,
|
||||
});
|
||||
};
|
||||
|
||||
// Finally, stop the recording (which will actually trigger the onstop event):
|
||||
mediaRecorder.stop();
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
306
app/MindWork AI Studio/wwwroot/audio.js
Normal file
306
app/MindWork AI Studio/wwwroot/audio.js
Normal file
@ -0,0 +1,306 @@
|
||||
// Shared the audio context for sound effects (Web Audio API does not register with Media Session):
|
||||
let soundEffectContext = null;
|
||||
|
||||
// Cache for decoded sound effect audio buffers:
|
||||
const soundEffectCache = new Map();
|
||||
|
||||
// Track the preload state:
|
||||
let soundEffectsPreloaded = false;
|
||||
|
||||
// Queue system: tracks when the next sound can start playing.
|
||||
// This prevents sounds from overlapping and getting "swallowed" by the audio system:
|
||||
let nextAvailablePlayTime = 0;
|
||||
|
||||
// Minimum gap between sounds in seconds (small buffer to ensure clean transitions):
|
||||
const SOUND_GAP_SECONDS = 0.25;
|
||||
|
||||
// List of all sound effects used in the app:
|
||||
const SOUND_EFFECT_PATHS = [
|
||||
'/sounds/start_recording.ogg',
|
||||
'/sounds/stop_recording.ogg',
|
||||
'/sounds/transcription_done.ogg'
|
||||
];
|
||||
|
||||
// Initialize the audio context with low-latency settings.
|
||||
// Should be called from a user interaction (click, keypress)
|
||||
// to satisfy browser autoplay policies:
|
||||
window.initSoundEffects = async function() {
|
||||
|
||||
if (soundEffectContext && soundEffectContext.state !== 'closed') {
|
||||
// Already initialized, just ensure it's running:
|
||||
if (soundEffectContext.state === 'suspended') {
|
||||
await soundEffectContext.resume();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Create the context with the interactive latency hint for the lowest latency:
|
||||
soundEffectContext = new (window.AudioContext || window.webkitAudioContext)({
|
||||
latencyHint: 'interactive'
|
||||
});
|
||||
|
||||
// Resume immediately (needed for Safari/macOS):
|
||||
if (soundEffectContext.state === 'suspended') {
|
||||
await soundEffectContext.resume();
|
||||
}
|
||||
|
||||
// Reset the queue timing:
|
||||
nextAvailablePlayTime = 0;
|
||||
|
||||
//
|
||||
// Play a very short silent buffer to "warm up" the audio pipeline.
|
||||
// This helps prevent the first real sound from being cut off:
|
||||
//
|
||||
const silentBuffer = soundEffectContext.createBuffer(1, 1, soundEffectContext.sampleRate);
|
||||
const silentSource = soundEffectContext.createBufferSource();
|
||||
silentSource.buffer = silentBuffer;
|
||||
silentSource.connect(soundEffectContext.destination);
|
||||
silentSource.start(0);
|
||||
|
||||
console.log('Sound effects - AudioContext initialized with latency:', soundEffectContext.baseLatency);
|
||||
|
||||
// Preload all sound effects in parallel:
|
||||
if (!soundEffectsPreloaded) {
|
||||
await window.preloadSoundEffects();
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Failed to initialize sound effects:', error);
|
||||
}
|
||||
};
|
||||
|
||||
// Preload all sound effect files into the cache:
|
||||
window.preloadSoundEffects = async function() {
|
||||
if (soundEffectsPreloaded) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure that the context exists:
|
||||
if (!soundEffectContext || soundEffectContext.state === 'closed') {
|
||||
soundEffectContext = new (window.AudioContext || window.webkitAudioContext)({
|
||||
latencyHint: 'interactive'
|
||||
});
|
||||
}
|
||||
|
||||
console.log('Sound effects - preloading', SOUND_EFFECT_PATHS.length, 'sound files...');
|
||||
|
||||
const preloadPromises = SOUND_EFFECT_PATHS.map(async (soundPath) => {
|
||||
try {
|
||||
const response = await fetch(soundPath);
|
||||
const arrayBuffer = await response.arrayBuffer();
|
||||
const audioBuffer = await soundEffectContext.decodeAudioData(arrayBuffer);
|
||||
soundEffectCache.set(soundPath, audioBuffer);
|
||||
|
||||
console.log('Sound effects - preloaded:', soundPath, 'duration:', audioBuffer.duration.toFixed(2), 's');
|
||||
} catch (error) {
|
||||
console.warn('Sound effects - failed to preload:', soundPath, error);
|
||||
}
|
||||
});
|
||||
|
||||
await Promise.all(preloadPromises);
|
||||
soundEffectsPreloaded = true;
|
||||
console.log('Sound effects - all files preloaded');
|
||||
};
|
||||
|
||||
window.playSound = async function(soundPath) {
|
||||
try {
|
||||
// Initialize context if needed (fallback if initSoundEffects wasn't called):
|
||||
if (!soundEffectContext || soundEffectContext.state === 'closed') {
|
||||
soundEffectContext = new (window.AudioContext || window.webkitAudioContext)({
|
||||
latencyHint: 'interactive'
|
||||
});
|
||||
|
||||
nextAvailablePlayTime = 0;
|
||||
}
|
||||
|
||||
// Resume if suspended (browser autoplay policy):
|
||||
if (soundEffectContext.state === 'suspended') {
|
||||
await soundEffectContext.resume();
|
||||
}
|
||||
|
||||
// Check the cache for already decoded audio:
|
||||
let audioBuffer = soundEffectCache.get(soundPath);
|
||||
|
||||
if (!audioBuffer) {
|
||||
// Fetch and decode the audio file (fallback if not preloaded):
|
||||
console.log('Sound effects - loading on demand:', soundPath);
|
||||
const response = await fetch(soundPath);
|
||||
const arrayBuffer = await response.arrayBuffer();
|
||||
audioBuffer = await soundEffectContext.decodeAudioData(arrayBuffer);
|
||||
soundEffectCache.set(soundPath, audioBuffer);
|
||||
}
|
||||
|
||||
// Calculate when this sound should start:
|
||||
const currentTime = soundEffectContext.currentTime;
|
||||
let startTime;
|
||||
|
||||
if (currentTime >= nextAvailablePlayTime) {
|
||||
// No sound is playing, or the previous sound has finished; start immediately:
|
||||
startTime = 0; // 0 means "now" in Web Audio API
|
||||
nextAvailablePlayTime = currentTime + audioBuffer.duration + SOUND_GAP_SECONDS;
|
||||
} else {
|
||||
// A sound is still playing; schedule this sound to start after it:
|
||||
startTime = nextAvailablePlayTime;
|
||||
nextAvailablePlayTime = startTime + audioBuffer.duration + SOUND_GAP_SECONDS;
|
||||
console.log('Sound effects - queued:', soundPath, 'will play in', (startTime - currentTime).toFixed(2), 's');
|
||||
}
|
||||
|
||||
// Create a new source node and schedule playback:
|
||||
const source = soundEffectContext.createBufferSource();
|
||||
source.buffer = audioBuffer;
|
||||
source.connect(soundEffectContext.destination);
|
||||
source.start(startTime);
|
||||
console.log('Sound effects - playing:', soundPath);
|
||||
|
||||
} catch (error) {
|
||||
console.warn('Failed to play sound effect:', error);
|
||||
}
|
||||
};
|
||||
|
||||
let mediaRecorder;
|
||||
let actualRecordingMimeType;
|
||||
let changedMimeType = false;
|
||||
let pendingChunkUploads = 0;
|
||||
|
||||
// Store the media stream so we can close the microphone later:
|
||||
let activeMediaStream = null;
|
||||
|
||||
// Delay in milliseconds to wait after getUserMedia() for Bluetooth profile switch (A2DP → HFP):
|
||||
const BLUETOOTH_PROFILE_SWITCH_DELAY_MS = 1_600;
|
||||
|
||||
window.audioRecorder = {
|
||||
start: async function (dotnetRef, desiredMimeTypes = []) {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
activeMediaStream = stream;
|
||||
|
||||
// Wait for Bluetooth headsets to complete the profile switch from A2DP to HFP.
|
||||
// This prevents the first sound from being cut off during the switch:
|
||||
console.log('Audio recording - waiting for Bluetooth profile switch...');
|
||||
await new Promise(r => setTimeout(r, BLUETOOTH_PROFILE_SWITCH_DELAY_MS));
|
||||
|
||||
// Play start recording sound effect:
|
||||
await window.playSound('/sounds/start_recording.ogg');
|
||||
|
||||
// When only one mime type is provided as a string, convert it to an array:
|
||||
if (typeof desiredMimeTypes === 'string') {
|
||||
desiredMimeTypes = [desiredMimeTypes];
|
||||
}
|
||||
|
||||
// Log sent mime types for debugging:
|
||||
console.log('Audio recording - requested mime types: ', desiredMimeTypes);
|
||||
|
||||
let mimeTypes = desiredMimeTypes.filter(type => typeof type === 'string' && type.trim() !== '');
|
||||
|
||||
// Next, we have to ensure that we have some default mime types to check as well.
|
||||
// In case the provided list does not contain these, we append them:
|
||||
// Use provided mime types or fallback to a default list:
|
||||
const defaultMimeTypes = [
|
||||
'audio/webm',
|
||||
'audio/ogg',
|
||||
'audio/mp4',
|
||||
'audio/mpeg',
|
||||
''// Fallback to browser default
|
||||
];
|
||||
|
||||
defaultMimeTypes.forEach(type => {
|
||||
if (!mimeTypes.includes(type)) {
|
||||
mimeTypes.push(type);
|
||||
}
|
||||
});
|
||||
|
||||
console.log('Audio recording - final mime types to check (included defaults): ', mimeTypes);
|
||||
|
||||
// Find the first supported mime type:
|
||||
actualRecordingMimeType = mimeTypes.find(type =>
|
||||
type === '' || MediaRecorder.isTypeSupported(type)
|
||||
) || '';
|
||||
|
||||
console.log('Audio recording - the browser selected the following mime type for recording: ', actualRecordingMimeType);
|
||||
const options = actualRecordingMimeType ? { mimeType: actualRecordingMimeType } : {};
|
||||
mediaRecorder = new MediaRecorder(stream, options);
|
||||
|
||||
// In case the browser changed the mime type:
|
||||
actualRecordingMimeType = mediaRecorder.mimeType;
|
||||
console.log('Audio recording - actual mime type used by the browser: ', actualRecordingMimeType);
|
||||
|
||||
// Check the list of desired mime types against the actual one:
|
||||
if (!desiredMimeTypes.includes(actualRecordingMimeType)) {
|
||||
changedMimeType = true;
|
||||
console.warn(`Audio recording - requested mime types ('${desiredMimeTypes.join(', ')}') do not include the actual mime type used by the browser ('${actualRecordingMimeType}').`);
|
||||
} else {
|
||||
changedMimeType = false;
|
||||
}
|
||||
|
||||
// Reset the pending uploads counter:
|
||||
pendingChunkUploads = 0;
|
||||
|
||||
// Stream each chunk directly to .NET as it becomes available:
|
||||
mediaRecorder.ondataavailable = async (event) => {
|
||||
if (event.data.size > 0) {
|
||||
pendingChunkUploads++;
|
||||
try {
|
||||
const arrayBuffer = await event.data.arrayBuffer();
|
||||
const uint8Array = new Uint8Array(arrayBuffer);
|
||||
await dotnetRef.invokeMethodAsync('OnAudioChunkReceived', uint8Array);
|
||||
} catch (error) {
|
||||
console.error('Error sending audio chunk to .NET:', error);
|
||||
} finally {
|
||||
pendingChunkUploads--;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
mediaRecorder.start(3000); // read the recorded data in 3-second chunks
|
||||
return actualRecordingMimeType;
|
||||
},
|
||||
|
||||
stop: async function () {
|
||||
return new Promise((resolve) => {
|
||||
|
||||
// Add an event listener to handle the stop event:
|
||||
mediaRecorder.onstop = async () => {
|
||||
|
||||
// Wait for all pending chunk uploads to complete before finalizing:
|
||||
console.log(`Audio recording - waiting for ${pendingChunkUploads} pending uploads.`);
|
||||
while (pendingChunkUploads > 0) {
|
||||
await new Promise(r => setTimeout(r, 10)); // wait 10 ms before checking again
|
||||
}
|
||||
|
||||
console.log('Audio recording - all chunks uploaded, finalizing.');
|
||||
|
||||
// Play stop recording sound effect:
|
||||
await window.playSound('/sounds/stop_recording.ogg');
|
||||
|
||||
//
|
||||
// IMPORTANT: Do NOT release the microphone here!
|
||||
// Bluetooth headsets switch profiles (HFP → A2DP) when the microphone is released,
|
||||
// which causes audio to be interrupted. We keep the microphone open so that the
|
||||
// stop_recording and transcription_done sounds can play without interruption.
|
||||
//
|
||||
// Call window.audioRecorder.releaseMicrophone() after the last sound has played.
|
||||
//
|
||||
|
||||
// No need to process data here anymore, just signal completion:
|
||||
resolve({
|
||||
mimeType: actualRecordingMimeType,
|
||||
changedMimeType: changedMimeType,
|
||||
});
|
||||
};
|
||||
|
||||
// Finally, stop the recording (which will actually trigger the onstop event):
|
||||
mediaRecorder.stop();
|
||||
});
|
||||
},
|
||||
|
||||
// Release the microphone after all sounds have been played.
|
||||
// This should be called after the transcription_done sound to allow
|
||||
// Bluetooth headsets to switch back to A2DP profile without interrupting audio:
|
||||
releaseMicrophone: function () {
|
||||
if (activeMediaStream) {
|
||||
console.log('Audio recording - releasing microphone (Bluetooth will switch back to A2DP)');
|
||||
activeMediaStream.getTracks().forEach(track => track.stop());
|
||||
activeMediaStream = null;
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1,4 +1,13 @@
|
||||
# v26.1.2, build 232 (2026-01-xx xx:xx UTC)
|
||||
- Added the option to hide specific assistants by configuration plugins. This is useful for enterprise environments in organizations.
|
||||
- Added the current date and time to the system prompt for better context in conversations. Thanks Peer `peerschuett` for the contribution.
|
||||
- Fixed a logging bug that prevented log events from being recorded in some cases.
|
||||
- Improved error handling for model loading in provider dialogs (LLMs, embeddings, transcriptions).
|
||||
- Improved the microphone handling (transcription preview) so that all sound effects and the voice recording are processed without interruption.
|
||||
- Improved the handling of self-hosted providers in the configuration dialogs (LLMs, embeddings, and transcriptions) when the host cannot provide a list of models.
|
||||
- Improved the document analysis assistant (in preview) by allowing users to send results to a new chat to ask follow-up questions. Thanks to Sabrina `Sabrina-devops` for this contribution.
|
||||
- Fixed a logging bug that prevented log events from being recorded in some cases.
|
||||
- Fixed a bug that allowed adding a provider (LLM, embedding, or transcription) without selecting a model.
|
||||
- Fixed a bug with local transcription providers by handling errors correctly when the local provider is unavailable.
|
||||
- Fixed a bug with local transcription providers by correctly handling empty model IDs.
|
||||
- Fixed a bug affecting the transcription preview: previously, when you stopped music or other media, recorded or dictated text, and then tried to resume playback, the media wouldn’t resume as expected. This behavior is now fixed.
|
||||
- Fixed a rare bug that occurred when multiple threads tried to manage the same chat thread.
|
||||
Loading…
Reference in New Issue
Block a user