namespace AIStudio.Provider.OpenAI; /// /// Data model for a delta line in the chat completion response stream. /// /// The id of the response. /// The object describing the response. /// The timestamp of the response. /// The model used for the response. /// The system fingerprint; together with the seed, this allows you to reproduce the response. /// The choices made by the AI. public record ChatCompletionDeltaStreamLine(string Id, string Object, uint Created, string Model, string SystemFingerprint, IList Choices) : IResponseStreamLine { public ChatCompletionDeltaStreamLine() : this(string.Empty, string.Empty, 0, string.Empty, string.Empty, []) { } /// public bool ContainsContent() => this.Choices.Count > 0; /// public ContentStreamChunk GetContent() => new(this.Choices[0].Delta.Content, []); #region Implementation of IAnnotationStreamLine // // Please note that there are multiple options where LLM providers might stream sources: // // - As part of the delta content while streaming. That would be part of this class. // - By using a dedicated stream event and data structure. That would be another class implementing IResponseStreamLine. // // Right now, OpenAI uses the latter approach, so we don't have any sources here. And // because no other provider does it yet, we don't have any implementation here either. // // One example where sources are part of the delta content is the Perplexity provider. // /// public bool ContainsSources() => false; /// public IList GetSources() => []; #endregion }