Coverage for src/lite_agent/types/messages.py: 95%

174 statements  

« prev     ^ index     » next       coverage.py v7.10.5, created at 2025-08-25 22:58 +0900

1from collections.abc import Sequence 

2from datetime import datetime, timezone 

3from typing import Any, Literal, NotRequired, TypedDict 

4 

5from pydantic import BaseModel, Field, model_validator 

6 

7 

8# Base metadata type 

9class MessageMeta(BaseModel): 

10 """Base metadata for all message types""" 

11 

12 sent_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc)) 

13 

14 

15class BasicMessageMeta(MessageMeta): 

16 """Basic metadata for user messages and function calls""" 

17 

18 execution_time_ms: int | None = None 

19 

20 

21class LLMResponseMeta(MessageMeta): 

22 """Metadata for LLM responses, includes performance metrics""" 

23 

24 latency_ms: int | None = None 

25 output_time_ms: int | None = None 

26 input_tokens: int | None = None 

27 output_tokens: int | None = None 

28 

29 

30# New unified metadata types 

31 

32 

33class MessageUsage(BaseModel): 

34 """Token usage statistics for messages""" 

35 

36 input_tokens: int | None = None 

37 output_tokens: int | None = None 

38 total_tokens: int | None = None 

39 

40 

41class AssistantMessageMeta(MessageMeta): 

42 """Enhanced metadata for assistant messages""" 

43 

44 model: str | None = None 

45 usage: MessageUsage | None = None 

46 total_time_ms: int | None = None 

47 latency_ms: int | None = None 

48 output_time_ms: int | None = None 

49 

50 

51class ResponseInputImageDict(TypedDict): 

52 detail: NotRequired[Literal["low", "high", "auto"]] 

53 type: Literal["input_image"] 

54 file_id: str | None 

55 image_url: str | None 

56 

57 

58class ResponseInputTextDict(TypedDict): 

59 text: str 

60 type: Literal["input_text"] 

61 

62 

63# TypedDict definitions for better type hints 

64class UserMessageDict(TypedDict): 

65 role: Literal["user"] 

66 content: str | Sequence[ResponseInputTextDict | ResponseInputImageDict] 

67 

68 

69class AssistantMessageDict(TypedDict): 

70 role: Literal["assistant"] 

71 content: str 

72 

73 

74class SystemMessageDict(TypedDict): 

75 role: Literal["system"] 

76 content: str 

77 

78 

79class FunctionCallDict(TypedDict): 

80 type: Literal["function_call"] 

81 call_id: str 

82 name: str 

83 arguments: str 

84 content: str 

85 

86 

87class FunctionCallOutputDict(TypedDict): 

88 type: Literal["function_call_output"] 

89 call_id: str 

90 output: str 

91 

92 

93# Union type for all supported message dictionary formats 

94MessageDict = UserMessageDict | AssistantMessageDict | SystemMessageDict | FunctionCallDict | FunctionCallOutputDict 

95 

96 

97# New structured message content types 

98class UserTextContent(BaseModel): 

99 type: Literal["text"] = "text" 

100 text: str 

101 

102 

103class UserImageContent(BaseModel): 

104 type: Literal["image"] = "image" 

105 image_url: str | None = None 

106 file_id: str | None = None 

107 detail: Literal["low", "high", "auto"] = "auto" 

108 

109 @model_validator(mode="after") 

110 def validate_image_source(self) -> "UserImageContent": 

111 if not self.file_id and not self.image_url: 

112 msg = "UserImageContent must have either file_id or image_url" 

113 raise ValueError(msg) 

114 return self 

115 

116 

117class UserFileContent(BaseModel): 

118 type: Literal["file"] = "file" 

119 file_id: str 

120 file_name: str | None = None 

121 

122 

123UserMessageContent = UserTextContent | UserImageContent | UserFileContent 

124 

125 

126class AssistantTextContent(BaseModel): 

127 type: Literal["text"] = "text" 

128 text: str 

129 

130 

131class AssistantToolCall(BaseModel): 

132 type: Literal["tool_call"] = "tool_call" 

133 call_id: str 

134 name: str 

135 arguments: dict[str, Any] | str 

136 

137 

138class AssistantToolCallResult(BaseModel): 

139 type: Literal["tool_call_result"] = "tool_call_result" 

140 call_id: str 

141 output: str 

142 execution_time_ms: int | None = None 

143 

144 

145AssistantMessageContent = AssistantTextContent | AssistantToolCall | AssistantToolCallResult 

146 

147 

148# New structured message types 

149class NewUserMessage(BaseModel): 

150 """User message with structured content support""" 

151 

152 role: Literal["user"] = "user" 

153 content: list[UserMessageContent] 

154 meta: MessageMeta = Field(default_factory=MessageMeta) 

155 

156 

157class NewSystemMessage(BaseModel): 

158 """System message""" 

159 

160 role: Literal["system"] = "system" 

161 content: str 

162 meta: MessageMeta = Field(default_factory=MessageMeta) 

163 

164 

165class NewAssistantMessage(BaseModel): 

166 """Assistant message with structured content and metadata""" 

167 

168 role: Literal["assistant"] = "assistant" 

169 content: list[AssistantMessageContent] 

170 meta: AssistantMessageMeta = Field(default_factory=AssistantMessageMeta) 

171 

172 

173# Union type for new structured messages 

174NewMessage = NewUserMessage | NewSystemMessage | NewAssistantMessage 

175NewMessages = Sequence[NewMessage] 

176 

177 

178# Response API format input types 

179class ResponseInputText(BaseModel): 

180 type: Literal["input_text"] = "input_text" 

181 text: str 

182 

183 

184class ResponseInputImage(BaseModel): 

185 detail: Literal["low", "high", "auto"] = "auto" 

186 type: Literal["input_image"] = "input_image" 

187 file_id: str | None = None 

188 image_url: str | None = None 

189 

190 @model_validator(mode="after") 

191 def validate_image_source(self) -> "ResponseInputImage": 

192 """Ensure at least one of file_id or image_url is provided.""" 

193 if not self.file_id and not self.image_url: 

194 msg = "ResponseInputImage must have either file_id or image_url" 

195 raise ValueError(msg) 

196 return self 

197 

198 

199# Compatibility types for old completion API format 

200class UserMessageContentItemText(BaseModel): 

201 type: Literal["text"] 

202 text: str 

203 

204 

205class UserMessageContentItemImageURLImageURL(BaseModel): 

206 url: str 

207 

208 

209class UserMessageContentItemImageURL(BaseModel): 

210 type: Literal["image_url"] 

211 image_url: UserMessageContentItemImageURLImageURL 

212 

213 

214# Legacy compatibility wrapper classes 

215class AgentUserMessage(NewUserMessage): 

216 def __init__( 

217 self, 

218 content: str | list[UserMessageContent] | None = None, 

219 *, 

220 role: Literal["user"] = "user", 

221 meta: MessageMeta | None = None, 

222 ): 

223 if isinstance(content, str): 

224 content = [UserTextContent(text=content)] 

225 elif content is None: 

226 content = [] 

227 super().__init__( 

228 role=role, 

229 content=content, 

230 meta=meta or MessageMeta(), 

231 ) 

232 

233 

234class AgentAssistantMessage(NewAssistantMessage): 

235 def __init__( 

236 self, 

237 content: str | list[AssistantMessageContent] | None = None, 

238 *, 

239 role: Literal["assistant"] = "assistant", 

240 meta: AssistantMessageMeta | None = None, 

241 ): 

242 if isinstance(content, str): 

243 content = [AssistantTextContent(text=content)] 

244 elif content is None: 

245 content = [] 

246 super().__init__( 

247 role=role, 

248 content=content, 

249 meta=meta or AssistantMessageMeta(), 

250 ) 

251 

252 

253AgentSystemMessage = NewSystemMessage 

254RunnerMessage = NewMessage 

255 

256 

257# Streaming processor types 

258class AssistantMessage(BaseModel): 

259 """ 

260 Temporary assistant message used during streaming processing. 

261 

262 This is a simplified message format used internally by completion event processors 

263 to accumulate streaming content before converting to the final NewAssistantMessage format. 

264 """ 

265 

266 role: Literal["assistant"] = "assistant" 

267 id: str = "" 

268 index: int | None = None 

269 content: str = "" 

270 tool_calls: list[Any] | None = None 

271 

272 

273# Enhanced type definitions for better type hints 

274# FlexibleRunnerMessage for internal storage - only NewMessage types 

275FlexibleRunnerMessage = NewMessage 

276RunnerMessages = Sequence[FlexibleRunnerMessage] 

277 

278# Input types that can be converted - includes dict for backward compatibility 

279FlexibleInputMessage = NewMessage | dict[str, Any] 

280InputMessages = Sequence[FlexibleInputMessage] 

281 

282# Type alias for user input - supports string, single message, or sequence of messages 

283UserInput = str | FlexibleInputMessage | InputMessages 

284 

285 

286def user_message_to_llm_dict(message: NewUserMessage) -> dict[str, Any]: 

287 """Convert NewUserMessage to dict for LLM API""" 

288 # Convert content to simplified format for LLM 

289 content = message.content[0].text if len(message.content) == 1 and message.content[0].type == "text" else [item.model_dump() for item in message.content] 

290 return {"role": message.role, "content": content} 

291 

292 

293def system_message_to_llm_dict(message: NewSystemMessage) -> dict[str, Any]: 

294 """Convert NewSystemMessage to dict for LLM API""" 

295 return {"role": message.role, "content": message.content} 

296 

297 

298def assistant_message_to_llm_dict(message: NewAssistantMessage) -> dict[str, Any]: 

299 """Convert NewAssistantMessage to dict for LLM API""" 

300 # Separate text content from tool calls 

301 text_parts = [] 

302 tool_calls = [] 

303 

304 for item in message.content: 

305 if item.type == "text": 

306 text_parts.append(item.text) 

307 elif item.type == "tool_call": 

308 tool_calls.append( 

309 { 

310 "id": item.call_id, 

311 "type": "function", 

312 "function": { 

313 "name": item.name, 

314 "arguments": item.arguments if isinstance(item.arguments, str) else str(item.arguments), 

315 }, 

316 }, 

317 ) 

318 

319 result = { 

320 "role": message.role, 

321 "content": " ".join(text_parts) if text_parts else None, 

322 } 

323 

324 if tool_calls: 

325 result["tool_calls"] = tool_calls 

326 

327 return result 

328 

329 

330def message_to_llm_dict(message: NewMessage) -> dict[str, Any]: 

331 """Convert any NewMessage to dict for LLM API""" 

332 if isinstance(message, NewUserMessage): 

333 return user_message_to_llm_dict(message) 

334 if isinstance(message, NewSystemMessage): 

335 return system_message_to_llm_dict(message) 

336 if isinstance(message, NewAssistantMessage): 

337 return assistant_message_to_llm_dict(message) 

338 # Fallback 

339 return message.model_dump(exclude={"meta"}) 

340 

341 

342def messages_to_llm_format(messages: Sequence[NewMessage]) -> list[dict[str, Any]]: 

343 """Convert a sequence of NewMessage to LLM format, excluding meta data""" 

344 return [message_to_llm_dict(message) for message in messages]