diff --git a/app/MindWork AI Studio/Assistants/AssistantBase.razor.cs b/app/MindWork AI Studio/Assistants/AssistantBase.razor.cs index f6582c39..fc9fcfff 100644 --- a/app/MindWork AI Studio/Assistants/AssistantBase.razor.cs +++ b/app/MindWork AI Studio/Assistants/AssistantBase.razor.cs @@ -293,7 +293,7 @@ public abstract partial class AssistantBase : ComponentBase, IMessageBusReceiver // Use the selected provider to get the AI response. // By awaiting this line, we wait for the entire // content to be streamed. - await aiText.CreateFromProviderAsync(this.providerSettings.CreateProvider(this.Logger), this.providerSettings.Model, this.lastUserPrompt, this.chatThread); + this.chatThread = await aiText.CreateFromProviderAsync(this.providerSettings.CreateProvider(this.Logger), this.providerSettings.Model, this.lastUserPrompt, this.chatThread); this.isProcessing = false; this.StateHasChanged(); diff --git a/app/MindWork AI Studio/Chat/ContentImage.cs b/app/MindWork AI Studio/Chat/ContentImage.cs index 9f09e87c..c7e785eb 100644 --- a/app/MindWork AI Studio/Chat/ContentImage.cs +++ b/app/MindWork AI Studio/Chat/ContentImage.cs @@ -28,7 +28,7 @@ public sealed class ContentImage : IContent, IImageSource public Func StreamingEvent { get; set; } = () => Task.CompletedTask; /// - public Task CreateFromProviderAsync(IProvider provider, Model chatModel, IContent? lastPrompt, ChatThread? chatChatThread, CancellationToken token = default) + public Task CreateFromProviderAsync(IProvider provider, Model chatModel, IContent? lastPrompt, ChatThread? chatChatThread, CancellationToken token = default) { throw new NotImplementedException(); } diff --git a/app/MindWork AI Studio/Chat/ContentText.cs b/app/MindWork AI Studio/Chat/ContentText.cs index 82b5334c..0dccd262 100644 --- a/app/MindWork AI Studio/Chat/ContentText.cs +++ b/app/MindWork AI Studio/Chat/ContentText.cs @@ -36,10 +36,10 @@ public sealed class ContentText : IContent public Func StreamingEvent { get; set; } = () => Task.CompletedTask; /// - public async Task CreateFromProviderAsync(IProvider provider, Model chatModel, IContent? lastPrompt, ChatThread? chatThread, CancellationToken token = default) + public async Task CreateFromProviderAsync(IProvider provider, Model chatModel, IContent? lastPrompt, ChatThread? chatThread, CancellationToken token = default) { if(chatThread is null) - return; + return new(); // Call the RAG process. Right now, we only have one RAG process: if (lastPrompt is not null) @@ -115,6 +115,7 @@ public sealed class ContentText : IContent // Inform the UI that the streaming is done: await this.StreamingDone(); + return chatThread; } #endregion diff --git a/app/MindWork AI Studio/Chat/IContent.cs b/app/MindWork AI Studio/Chat/IContent.cs index 8ca94025..be3bf097 100644 --- a/app/MindWork AI Studio/Chat/IContent.cs +++ b/app/MindWork AI Studio/Chat/IContent.cs @@ -41,7 +41,7 @@ public interface IContent /// /// Uses the provider to create the content. /// - public Task CreateFromProviderAsync(IProvider provider, Model chatModel, IContent? lastPrompt, ChatThread? chatChatThread, CancellationToken token = default); + public Task CreateFromProviderAsync(IProvider provider, Model chatModel, IContent? lastPrompt, ChatThread? chatChatThread, CancellationToken token = default); /// /// Returns the corresponding ERI content type. diff --git a/app/MindWork AI Studio/Components/ChatComponent.razor.cs b/app/MindWork AI Studio/Components/ChatComponent.razor.cs index 67d805d2..2e05c0fe 100644 --- a/app/MindWork AI Studio/Components/ChatComponent.razor.cs +++ b/app/MindWork AI Studio/Components/ChatComponent.razor.cs @@ -475,7 +475,7 @@ public partial class ChatComponent : MSGComponentBase, IAsyncDisposable // Use the selected provider to get the AI response. // By awaiting this line, we wait for the entire // content to be streamed. - await aiText.CreateFromProviderAsync(this.Provider.CreateProvider(this.Logger), this.Provider.Model, lastUserPrompt, this.ChatThread, this.cancellationTokenSource.Token); + this.ChatThread = await aiText.CreateFromProviderAsync(this.Provider.CreateProvider(this.Logger), this.Provider.Model, lastUserPrompt, this.ChatThread, this.cancellationTokenSource.Token); } this.cancellationTokenSource = null; diff --git a/app/MindWork AI Studio/Pages/Writer.razor.cs b/app/MindWork AI Studio/Pages/Writer.razor.cs index 76409c32..42b23abc 100644 --- a/app/MindWork AI Studio/Pages/Writer.razor.cs +++ b/app/MindWork AI Studio/Pages/Writer.razor.cs @@ -139,7 +139,7 @@ public partial class Writer : MSGComponentBase, IAsyncDisposable this.isStreaming = true; this.StateHasChanged(); - await aiText.CreateFromProviderAsync(this.providerSettings.CreateProvider(this.Logger), this.providerSettings.Model, lastUserPrompt, this.chatThread); + this.chatThread = await aiText.CreateFromProviderAsync(this.providerSettings.CreateProvider(this.Logger), this.providerSettings.Model, lastUserPrompt, this.chatThread); this.suggestion = aiText.Text; this.isStreaming = false; diff --git a/app/MindWork AI Studio/wwwroot/changelog/v0.9.32.md b/app/MindWork AI Studio/wwwroot/changelog/v0.9.32.md index 3e78d53a..9871b058 100644 --- a/app/MindWork AI Studio/wwwroot/changelog/v0.9.32.md +++ b/app/MindWork AI Studio/wwwroot/changelog/v0.9.32.md @@ -4,4 +4,5 @@ - Improved the ERI client to raise an error when the server responds with additional JSON data that is not expected. - Improved the error handling in the ERI data source info dialog in cases where servers respond with an invalid message. - Improved the error handling for the entire RAG process. +- Improved chat thread persistence after modifications through the RAG process. - Fixed the chat thread we use for the data retrieval by removing the last block, which is meant to be for the final AI answer. \ No newline at end of file