Skip to main content

PromptMessage

Chat message structures and role enumerations for LLM conversations.

Overview

Message types define chat messages with roles and multi-modal content arrays. These are the building blocks for all prompt conversations in the Adaline SDK.

MessageRole

See the dedicated MessageRole page for full documentation. Message role enumeration for chat messages.
from adaline_api.models.message_role import MessageRole
MessageRole is a str enum with the following values:
ValueEnum MemberDescription
"system"MessageRole.SYSTEMSystem instructions or context
"user"MessageRole.USERUser messages and input
"assistant"MessageRole.ASSISTANTAI assistant responses
"tool"MessageRole.TOOLTool/function responses
Example:
from adaline_api.models.message_role import MessageRole

role = MessageRole.USER        # 'user'
role = MessageRole.SYSTEM      # 'system'
role = MessageRole.ASSISTANT   # 'assistant'
role = MessageRole.TOOL        # 'tool'

# String comparison works
assert MessageRole.USER == "user"

PromptMessage

Chat message with role and multi-modal content for LLM conversations.
from adaline_api.models.prompt_message import PromptMessage

Fields

role
MessageRole
required
The message sender role. One of: "system", "user", "assistant", "tool".
content
list[MessageContent]
required
Array of content items. See MessageContent.

Examples

Simple Text Message

from adaline_api.models.prompt_message import PromptMessage
from adaline_api.models.message_content import MessageContent
from adaline_api.models.text_content import TextContent

message = PromptMessage(
    role="user",
    content=[
        MessageContent(actual_instance=TextContent(
            modality="text",
            value="Hello, how are you?"
        ))
    ]
)

System Message

system_message = PromptMessage(
    role="system",
    content=[
        MessageContent(actual_instance=TextContent(
            modality="text",
            value="You are a helpful AI assistant."
        ))
    ]
)

Multi-Modal Message

from adaline_api.models.image_content import ImageContent
from adaline_api.models.image_content_value import ImageContentValue

multi_modal = PromptMessage(
    role="user",
    content=[
        MessageContent(actual_instance=TextContent(
            modality="text",
            value="Describe this image"
        )),
        MessageContent(actual_instance=ImageContent(
            modality="image",
            detail="high",
            value=ImageContentValue.from_dict({
                "type": "url",
                "url": "https://example.com/img.jpg"
            })
        ))
    ]
)

Conversation History

conversation: list[PromptMessage] = [
    PromptMessage(
        role="system",
        content=[MessageContent(actual_instance=TextContent(
            modality="text", value="You are helpful."
        ))]
    ),
    PromptMessage(
        role="user",
        content=[MessageContent(actual_instance=TextContent(
            modality="text", value="What is 2+2?"
        ))]
    ),
    PromptMessage(
        role="assistant",
        content=[MessageContent(actual_instance=TextContent(
            modality="text", value="2+2 equals 4."
        ))]
    ),
    PromptMessage(
        role="user",
        content=[MessageContent(actual_instance=TextContent(
            modality="text", value="And 3+3?"
        ))]
    ),
    PromptMessage(
        role="assistant",
        content=[MessageContent(actual_instance=TextContent(
            modality="text", value="3+3 equals 6."
        ))]
    ),
]

Tool Call Sequence

import json
from adaline_api.models.tool_call_content import ToolCallContent
from adaline_api.models.tool_response_content import ToolResponseContent

# User asks
user_msg = PromptMessage(
    role="user",
    content=[MessageContent(actual_instance=TextContent(
        modality="text",
        value="What is the weather in Paris?"
    ))]
)

# Assistant requests tool
assistant_tool = PromptMessage(
    role="assistant",
    content=[MessageContent(actual_instance=ToolCallContent(
        modality="tool-call",
        index=0,
        id="call_123",
        name="get_weather",
        arguments=json.dumps({"city": "Paris"})
    ))]
)

# Tool responds
tool_msg = PromptMessage(
    role="tool",
    content=[MessageContent(actual_instance=ToolResponseContent(
        modality="tool-response",
        index=0,
        id="call_123",
        name="get_weather",
        data=json.dumps({"temp": 24, "conditions": "sunny"})
    ))]
)

# Assistant answers
assistant_answer = PromptMessage(
    role="assistant",
    content=[MessageContent(actual_instance=TextContent(
        modality="text",
        value="In Paris it is sunny and 24°C."
    ))]
)

Using with Deployments

from adaline.main import Adaline
from adaline_api.models.prompt_message import PromptMessage
from adaline_api.models.message_content import MessageContent
from adaline_api.models.text_content import TextContent

adaline = Adaline()

deployment = await adaline.get_latest_deployment(
    prompt_id="prompt_abc123",
    deployment_environment_id="environment_abc123"
)

# Get messages from deployment
system_messages: list[PromptMessage] = deployment.prompt.messages

# Add user message
user_message = PromptMessage(
    role="user",
    content=[MessageContent(actual_instance=TextContent(
        modality="text",
        value="Hello!"
    ))]
)

# Combine for API call
all_messages = [*system_messages, user_message]

Helper Functions

from adaline_api.models.prompt_message import PromptMessage
from adaline_api.models.message_content import MessageContent
from adaline_api.models.text_content import TextContent
from adaline_api.models.image_content import ImageContent


def create_text_message(role: str, text: str) -> PromptMessage:
    return PromptMessage(
        role=role,
        content=[MessageContent(actual_instance=TextContent(
            modality="text", value=text
        ))]
    )


def get_message_text(message: PromptMessage) -> str:
    texts = []
    for c in message.content:
        if isinstance(c.actual_instance, TextContent):
            texts.append(c.actual_instance.value)
    return " ".join(texts)


def has_images(message: PromptMessage) -> bool:
    return any(
        isinstance(c.actual_instance, ImageContent)
        for c in message.content
    )


def filter_by_role(
    messages: list[PromptMessage], role: str
) -> list[PromptMessage]:
    return [m for m in messages if m.role == role]


# Usage
msg = create_text_message("user", "Hello!")
text = get_message_text(msg)        # 'Hello!'
has_img = has_images(msg)           # False

Serialization

All autogen models support to_dict(), to_json(), from_dict(), and from_json() for serialization.
message = PromptMessage(
    role="user",
    content=[MessageContent(actual_instance=TextContent(
        modality="text", value="Hello!"
    ))]
)

# To dict / JSON
d = message.to_dict()
j = message.to_json()

# From dict / JSON
restored = PromptMessage.from_dict(d)
restored = PromptMessage.from_json(j)

JSON Examples

Simple

{
  "role": "user",
  "content": [
    { "modality": "text", "value": "Hello!" }
  ]
}

With Image

{
  "role": "user",
  "content": [
    { "modality": "text", "value": "Describe this" },
    {
      "modality": "image",
      "detail": "high",
      "value": {
        "type": "url",
        "url": "https://example.com/image.jpg"
      }
    }
  ]
}

Tool Call

{
  "role": "assistant",
  "content": [
    {
      "modality": "tool-call",
      "index": 0,
      "id": "call_123",
      "name": "get_weather",
      "arguments": "{\"city\":\"Paris\"}"
    }
  ]
}