Add multimodal message support with content type enumeration and sub-content models

This commit is contained in:
Thorsten Sommer 2025-12-28 20:09:33 +01:00
parent 8a966737a5
commit 4881b1a095
Signed by: tsommer
GPG Key ID: 371BBA77A02C0108
7 changed files with 109 additions and 0 deletions

View File

@ -0,0 +1,34 @@
namespace AIStudio.Provider.OpenAI;
/// <summary>
/// Content types for OpenAI API interactions when using multimodal messages.
/// </summary>
public enum ContentType
{
/// <summary>
/// Default type for user prompts in multimodal messages. This type is supported across all providers.
/// </summary>
TEXT,
/// <summary>
/// Right now only supported by OpenAI and it's responses API. Even other providers that support multimodal messages
/// and the responses API do not support this type. They use TEXT instead.
/// </summary>
INPUT_TEXT,
/// <summary>
/// Right now only supported by OpenAI and it's responses API. Even other providers that support multimodal messages
/// and the responses API do not support this type. They use IMAGE_URL instead.
/// </summary>
INPUT_IMAGE,
/// <summary>
/// Right now only supported by OpenAI (responses & chat completion API), Google (chat completions API), and Mistral (chat completions API).
/// </summary>
INPUT_AUDIO,
/// <summary>
/// Default type for images in multimodal messages. This type is supported across all providers.
/// </summary>
IMAGE_URL,
}

View File

@ -0,0 +1,12 @@
namespace AIStudio.Provider.OpenAI;
/// <summary>
/// Contract for sub-content in multimodal messages.
/// </summary>
public interface ISubContent
{
/// <summary>
/// The type of the sub-content.
/// </summary>
public ContentType Type { get; init; }
}

View File

@ -0,0 +1,13 @@
namespace AIStudio.Provider.OpenAI;
/// <summary>
/// A multimodal chat message model that can contain various types of content.
/// </summary>
/// <param name="Content">The list of sub-contents in the message.</param>
/// <param name="Role">The role of the message.</param>
public record MultimodalMessage(List<ISubContent> Content, string Role) : IMessage<List<ISubContent>>
{
public MultimodalMessage() : this([], string.Empty)
{
}
}

View File

@ -0,0 +1,11 @@
namespace AIStudio.Provider.OpenAI;
/// <summary>
/// Image sub-content for multimodal messages.
/// </summary>
public record SubContentImageUrl(ContentType Type, string ImageUrl) : ISubContent
{
public SubContentImageUrl() : this(ContentType.IMAGE_URL, string.Empty)
{
}
}

View File

@ -0,0 +1,14 @@
namespace AIStudio.Provider.OpenAI;
/// <summary>
/// Image input sub-content for multimodal messages.
/// </summary>
/// <remarks>
/// Right now, this is used only by OpenAI in its responses API.
/// </remarks>
public record SubContentInputImage(ContentType Type, string ImageUrl) : ISubContent
{
public SubContentInputImage() : this(ContentType.INPUT_IMAGE, string.Empty)
{
}
}

View File

@ -0,0 +1,14 @@
namespace AIStudio.Provider.OpenAI;
/// <summary>
/// Text input sub-content for multimodal messages.
/// </summary>
/// <remarks>
/// Right now, this is used only by OpenAI in its responses API.
/// </remarks>
public record SubContentInputText(ContentType Type, string Text) : ISubContent
{
public SubContentInputText() : this(ContentType.INPUT_TEXT, string.Empty)
{
}
}

View File

@ -0,0 +1,11 @@
namespace AIStudio.Provider.OpenAI;
/// <summary>
/// Text sub-content for multimodal messages.
/// </summary>
public record SubContentText(ContentType Type, string Text) : ISubContent
{
public SubContentText() : this(ContentType.TEXT, string.Empty)
{
}
}