Coverage for src/lite_agent/types/messages.py: 95%
174 statements
« prev ^ index » next coverage.py v7.10.5, created at 2025-08-25 22:58 +0900
« prev ^ index » next coverage.py v7.10.5, created at 2025-08-25 22:58 +0900
1from collections.abc import Sequence
2from datetime import datetime, timezone
3from typing import Any, Literal, NotRequired, TypedDict
5from pydantic import BaseModel, Field, model_validator
8# Base metadata type
9class MessageMeta(BaseModel):
10 """Base metadata for all message types"""
12 sent_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
15class BasicMessageMeta(MessageMeta):
16 """Basic metadata for user messages and function calls"""
18 execution_time_ms: int | None = None
21class LLMResponseMeta(MessageMeta):
22 """Metadata for LLM responses, includes performance metrics"""
24 latency_ms: int | None = None
25 output_time_ms: int | None = None
26 input_tokens: int | None = None
27 output_tokens: int | None = None
30# New unified metadata types
33class MessageUsage(BaseModel):
34 """Token usage statistics for messages"""
36 input_tokens: int | None = None
37 output_tokens: int | None = None
38 total_tokens: int | None = None
41class AssistantMessageMeta(MessageMeta):
42 """Enhanced metadata for assistant messages"""
44 model: str | None = None
45 usage: MessageUsage | None = None
46 total_time_ms: int | None = None
47 latency_ms: int | None = None
48 output_time_ms: int | None = None
51class ResponseInputImageDict(TypedDict):
52 detail: NotRequired[Literal["low", "high", "auto"]]
53 type: Literal["input_image"]
54 file_id: str | None
55 image_url: str | None
58class ResponseInputTextDict(TypedDict):
59 text: str
60 type: Literal["input_text"]
63# TypedDict definitions for better type hints
64class UserMessageDict(TypedDict):
65 role: Literal["user"]
66 content: str | Sequence[ResponseInputTextDict | ResponseInputImageDict]
69class AssistantMessageDict(TypedDict):
70 role: Literal["assistant"]
71 content: str
74class SystemMessageDict(TypedDict):
75 role: Literal["system"]
76 content: str
79class FunctionCallDict(TypedDict):
80 type: Literal["function_call"]
81 call_id: str
82 name: str
83 arguments: str
84 content: str
87class FunctionCallOutputDict(TypedDict):
88 type: Literal["function_call_output"]
89 call_id: str
90 output: str
93# Union type for all supported message dictionary formats
94MessageDict = UserMessageDict | AssistantMessageDict | SystemMessageDict | FunctionCallDict | FunctionCallOutputDict
97# New structured message content types
98class UserTextContent(BaseModel):
99 type: Literal["text"] = "text"
100 text: str
103class UserImageContent(BaseModel):
104 type: Literal["image"] = "image"
105 image_url: str | None = None
106 file_id: str | None = None
107 detail: Literal["low", "high", "auto"] = "auto"
109 @model_validator(mode="after")
110 def validate_image_source(self) -> "UserImageContent":
111 if not self.file_id and not self.image_url:
112 msg = "UserImageContent must have either file_id or image_url"
113 raise ValueError(msg)
114 return self
117class UserFileContent(BaseModel):
118 type: Literal["file"] = "file"
119 file_id: str
120 file_name: str | None = None
123UserMessageContent = UserTextContent | UserImageContent | UserFileContent
126class AssistantTextContent(BaseModel):
127 type: Literal["text"] = "text"
128 text: str
131class AssistantToolCall(BaseModel):
132 type: Literal["tool_call"] = "tool_call"
133 call_id: str
134 name: str
135 arguments: dict[str, Any] | str
138class AssistantToolCallResult(BaseModel):
139 type: Literal["tool_call_result"] = "tool_call_result"
140 call_id: str
141 output: str
142 execution_time_ms: int | None = None
145AssistantMessageContent = AssistantTextContent | AssistantToolCall | AssistantToolCallResult
148# New structured message types
149class NewUserMessage(BaseModel):
150 """User message with structured content support"""
152 role: Literal["user"] = "user"
153 content: list[UserMessageContent]
154 meta: MessageMeta = Field(default_factory=MessageMeta)
157class NewSystemMessage(BaseModel):
158 """System message"""
160 role: Literal["system"] = "system"
161 content: str
162 meta: MessageMeta = Field(default_factory=MessageMeta)
165class NewAssistantMessage(BaseModel):
166 """Assistant message with structured content and metadata"""
168 role: Literal["assistant"] = "assistant"
169 content: list[AssistantMessageContent]
170 meta: AssistantMessageMeta = Field(default_factory=AssistantMessageMeta)
173# Union type for new structured messages
174NewMessage = NewUserMessage | NewSystemMessage | NewAssistantMessage
175NewMessages = Sequence[NewMessage]
178# Response API format input types
179class ResponseInputText(BaseModel):
180 type: Literal["input_text"] = "input_text"
181 text: str
184class ResponseInputImage(BaseModel):
185 detail: Literal["low", "high", "auto"] = "auto"
186 type: Literal["input_image"] = "input_image"
187 file_id: str | None = None
188 image_url: str | None = None
190 @model_validator(mode="after")
191 def validate_image_source(self) -> "ResponseInputImage":
192 """Ensure at least one of file_id or image_url is provided."""
193 if not self.file_id and not self.image_url:
194 msg = "ResponseInputImage must have either file_id or image_url"
195 raise ValueError(msg)
196 return self
199# Compatibility types for old completion API format
200class UserMessageContentItemText(BaseModel):
201 type: Literal["text"]
202 text: str
205class UserMessageContentItemImageURLImageURL(BaseModel):
206 url: str
209class UserMessageContentItemImageURL(BaseModel):
210 type: Literal["image_url"]
211 image_url: UserMessageContentItemImageURLImageURL
214# Legacy compatibility wrapper classes
215class AgentUserMessage(NewUserMessage):
216 def __init__(
217 self,
218 content: str | list[UserMessageContent] | None = None,
219 *,
220 role: Literal["user"] = "user",
221 meta: MessageMeta | None = None,
222 ):
223 if isinstance(content, str):
224 content = [UserTextContent(text=content)]
225 elif content is None:
226 content = []
227 super().__init__(
228 role=role,
229 content=content,
230 meta=meta or MessageMeta(),
231 )
234class AgentAssistantMessage(NewAssistantMessage):
235 def __init__(
236 self,
237 content: str | list[AssistantMessageContent] | None = None,
238 *,
239 role: Literal["assistant"] = "assistant",
240 meta: AssistantMessageMeta | None = None,
241 ):
242 if isinstance(content, str):
243 content = [AssistantTextContent(text=content)]
244 elif content is None:
245 content = []
246 super().__init__(
247 role=role,
248 content=content,
249 meta=meta or AssistantMessageMeta(),
250 )
253AgentSystemMessage = NewSystemMessage
254RunnerMessage = NewMessage
257# Streaming processor types
258class AssistantMessage(BaseModel):
259 """
260 Temporary assistant message used during streaming processing.
262 This is a simplified message format used internally by completion event processors
263 to accumulate streaming content before converting to the final NewAssistantMessage format.
264 """
266 role: Literal["assistant"] = "assistant"
267 id: str = ""
268 index: int | None = None
269 content: str = ""
270 tool_calls: list[Any] | None = None
273# Enhanced type definitions for better type hints
274# FlexibleRunnerMessage for internal storage - only NewMessage types
275FlexibleRunnerMessage = NewMessage
276RunnerMessages = Sequence[FlexibleRunnerMessage]
278# Input types that can be converted - includes dict for backward compatibility
279FlexibleInputMessage = NewMessage | dict[str, Any]
280InputMessages = Sequence[FlexibleInputMessage]
282# Type alias for user input - supports string, single message, or sequence of messages
283UserInput = str | FlexibleInputMessage | InputMessages
286def user_message_to_llm_dict(message: NewUserMessage) -> dict[str, Any]:
287 """Convert NewUserMessage to dict for LLM API"""
288 # Convert content to simplified format for LLM
289 content = message.content[0].text if len(message.content) == 1 and message.content[0].type == "text" else [item.model_dump() for item in message.content]
290 return {"role": message.role, "content": content}
293def system_message_to_llm_dict(message: NewSystemMessage) -> dict[str, Any]:
294 """Convert NewSystemMessage to dict for LLM API"""
295 return {"role": message.role, "content": message.content}
298def assistant_message_to_llm_dict(message: NewAssistantMessage) -> dict[str, Any]:
299 """Convert NewAssistantMessage to dict for LLM API"""
300 # Separate text content from tool calls
301 text_parts = []
302 tool_calls = []
304 for item in message.content:
305 if item.type == "text":
306 text_parts.append(item.text)
307 elif item.type == "tool_call":
308 tool_calls.append(
309 {
310 "id": item.call_id,
311 "type": "function",
312 "function": {
313 "name": item.name,
314 "arguments": item.arguments if isinstance(item.arguments, str) else str(item.arguments),
315 },
316 },
317 )
319 result = {
320 "role": message.role,
321 "content": " ".join(text_parts) if text_parts else None,
322 }
324 if tool_calls:
325 result["tool_calls"] = tool_calls
327 return result
330def message_to_llm_dict(message: NewMessage) -> dict[str, Any]:
331 """Convert any NewMessage to dict for LLM API"""
332 if isinstance(message, NewUserMessage):
333 return user_message_to_llm_dict(message)
334 if isinstance(message, NewSystemMessage):
335 return system_message_to_llm_dict(message)
336 if isinstance(message, NewAssistantMessage):
337 return assistant_message_to_llm_dict(message)
338 # Fallback
339 return message.model_dump(exclude={"meta"})
342def messages_to_llm_format(messages: Sequence[NewMessage]) -> list[dict[str, Any]]:
343 """Convert a sequence of NewMessage to LLM format, excluding meta data"""
344 return [message_to_llm_dict(message) for message in messages]