Coverage for .claude/hooks/moai/lib/daily_analysis.py: 0.00%

115 statements  

« prev     ^ index     » next       coverage.py v7.11.3, created at 2025-11-19 08:00 +0900

1#!/usr/bin/env python3 

2"""Daily Analysis Handler: Trigger daily session analysis and self-learning 

3 

4This handler implements daily session analysis with self-learning capabilities 

5for continuous improvement and adaptation of Claude Code usage patterns. 

6""" 

7 

8import json 

9from dataclasses import dataclass 

10from datetime import datetime 

11from pathlib import Path 

12from typing import Any, Dict 

13 

14 

15@dataclass 

16class AnalysisResult: 

17 """Result of daily analysis operation""" 

18 continue_execution: bool = True 

19 analysis_completed: bool = False 

20 days_since_last_analysis: int = 0 

21 report_path: str = "" 

22 message: str = "" 

23 

24 

25def handle_daily_analysis(session_data: Dict[str, Any]) -> AnalysisResult: 

26 """Handle daily session analysis 

27 

28 Args: 

29 session_data: Session start data from Claude Code 

30 

31 Returns: 

32 AnalysisResult: Analysis operation result 

33 """ 

34 project_root = Path(session_data.get("projectPath", ".")) 

35 moai_dir = project_root / ".moai" 

36 

37 if not moai_dir.exists(): 

38 return AnalysisResult( 

39 message="No .moai directory found - skipping daily analysis" 

40 ) 

41 

42 # Check last analysis date 

43 last_analysis_file = moai_dir / "last_analysis_date.json" 

44 cutoff_days = 1 # Run analysis every day 

45 

46 today = datetime.now() 

47 last_analysis_date = None 

48 

49 if last_analysis_file.exists(): 

50 try: 

51 last_data = json.loads(last_analysis_file.read_text(encoding="utf-8")) 

52 last_analysis_str = last_data.get("last_analysis_date") 

53 if last_analysis_str: 

54 last_analysis_date = datetime.fromisoformat(last_analysis_str) 

55 except (json.JSONDecodeError, ValueError, KeyError): 

56 pass # If file is corrupt, ignore and proceed 

57 

58 days_since_last = (today - last_analysis_date).days if last_analysis_date else 999 

59 

60 if days_since_last < cutoff_days: 

61 return AnalysisResult( 

62 analysis_completed=False, 

63 days_since_last_analysis=days_since_last, 

64 message=f"Analysis not due yet (last: {days_since_last} days ago)" 

65 ) 

66 

67 # Run analysis 

68 return _run_daily_analysis(project_root, moai_dir, today) 

69 

70 

71def _run_daily_analysis(project_root: Path, moai_dir: Path, analysis_date: datetime) -> AnalysisResult: 

72 """Run the actual daily analysis 

73 

74 Args: 

75 project_root: Project root directory 

76 moai_dir: .moai directory 

77 analysis_date: Current analysis date 

78 

79 Returns: 

80 AnalysisResult: Analysis operation result 

81 """ 

82 try: 

83 # Import session analyzer from moai-adk package 

84 from moai_adk.core.analysis import SessionAnalyzer 

85 

86 # Create analyzer for last 1 day 

87 analyzer = SessionAnalyzer(days_back=1, verbose=False) 

88 

89 # Run analysis 

90 analysis_results = analyzer.parse_sessions() 

91 

92 # Generate report 

93 reports_dir = moai_dir / "reports" 

94 reports_dir.mkdir(exist_ok=True) 

95 

96 report_filename = f"daily-{analysis_date.strftime('%Y-%m-%d')}.md" 

97 report_path = reports_dir / report_filename 

98 

99 # Generate report content 

100 report_content = _generate_daily_report(analysis_results, analysis_date) 

101 report_path.write_text(report_content, encoding="utf-8") 

102 

103 # Update last analysis date 

104 last_analysis_file = moai_dir / "last_analysis_date.json" 

105 last_analysis_data = { 

106 "last_analysis_date": analysis_date.isoformat(), 

107 "report_file": report_filename, 

108 "metrics": { 

109 "total_sessions": analysis_results.get("total_sessions", 0), 

110 "total_events": analysis_results.get("total_events", 0), 

111 "success_rate": analysis_results.get("success_rate", 0.0) 

112 } 

113 } 

114 last_analysis_file.write_text(json.dumps(last_analysis_data, indent=2), encoding="utf-8") 

115 

116 # Self-learning: Store patterns and insights 

117 _store_learning_patterns(moai_dir, analysis_results, analysis_date) 

118 

119 return AnalysisResult( 

120 continue_execution=True, 

121 analysis_completed=True, 

122 days_since_last_analysis=1, 

123 report_path=str(report_path.relative_to(project_root)), 

124 message=f"Daily analysis completed: {len(analysis_results.get('tool_usage', {}))} tools analyzed" 

125 ) 

126 

127 except Exception as e: 

128 return AnalysisResult( 

129 analysis_completed=False, 

130 message=f"Analysis failed: {str(e)}" 

131 ) 

132 

133 

134def _generate_daily_report(analysis_results: Dict[str, Any], analysis_date: datetime) -> str: 

135 """Generate daily analysis report content 

136 

137 Args: 

138 analysis_results: Results from session analysis 

139 analysis_date: Analysis date 

140 

141 Returns: 

142 str: Markdown report content 

143 """ 

144 report_lines = [ 

145 "# Daily Session Analysis Report", 

146 "", 

147 f"**Analysis Date**: {analysis_date.strftime('%Y-%m-%d %H:%M:%S')}", 

148 "**Analysis Period**: Last 24 hours", 

149 "", 

150 "## Summary Metrics", 

151 "", 

152 f"- **Total Sessions**: {analysis_results.get('total_sessions', 0)}", 

153 f"- **Total Events**: {analysis_results.get('total_events', 0)}", 

154 f"- **Success Rate**: {analysis_results.get('success_rate', 0):.1%}", 

155 f"- **Failed Sessions**: {analysis_results.get('failed_sessions', 0)}", 

156 "", 

157 "## Tool Usage", 

158 "" 

159 ] 

160 

161 tool_usage = analysis_results.get("tool_usage", {}) 

162 if tool_usage: 

163 for tool, count in sorted(tool_usage.items(), key=lambda x: x[1], reverse=True)[:10]: 

164 report_lines.append(f"- **{tool}**: {count} uses") 

165 

166 report_lines.extend([ 

167 "", 

168 "## Error Patterns", 

169 "" 

170 ]) 

171 

172 error_patterns = analysis_results.get("error_patterns", {}) 

173 if error_patterns: 

174 for error, count in sorted(error_patterns.items(), key=lambda x: x[1], reverse=True)[:5]: 

175 report_lines.append(f"- **{error}**: {count} occurrences") 

176 else: 

177 report_lines.append("- No error patterns detected") 

178 

179 report_lines.extend([ 

180 "", 

181 "## Daily Insights", 

182 "", 

183 "1. Monitor tool usage patterns for daily optimization opportunities", 

184 "2. Address any recurring error patterns immediately", 

185 "3. Review frequently used tools for performance optimization", 

186 "4. Track daily productivity and workflow efficiency", 

187 "", 

188 "---", 

189 "*Generated by MoAI-ADK Daily Analysis*" 

190 ]) 

191 

192 return "\n".join(report_lines) 

193 

194 

195def to_dict(self) -> Dict[str, Any]: 

196 """Convert AnalysisResult to dictionary for JSON serialization 

197 

198 Returns: 

199 Dictionary with continue and hookSpecificOutput keys 

200 """ 

201 return { 

202 "continue": self.continue_execution, 

203 "hookSpecificOutput": { 

204 "analysis_completed": self.analysis_completed, 

205 "days_since_last_analysis": self.days_since_last_analysis, 

206 "report_path": self.report_path, 

207 "message": self.message 

208 } 

209 } 

210 

211 

212def _store_learning_patterns(moai_dir: Path, analysis_results: Dict[str, Any], analysis_date: datetime) -> None: 

213 """Store daily analysis results for self-learning and pattern recognition 

214 

215 Args: 

216 moai_dir: .moai directory 

217 analysis_results: Results from session analysis 

218 analysis_date: Analysis date 

219 """ 

220 memory_dir = moai_dir / "memory" 

221 patterns_file = memory_dir / "daily-patterns.json" 

222 

223 # Load existing patterns 

224 if patterns_file.exists(): 

225 patterns_data = json.loads(patterns_file.read_text(encoding="utf-8")) 

226 else: 

227 patterns_data = { 

228 "version": "1.0.0", 

229 "created_at": analysis_date.isoformat(), 

230 "last_updated": analysis_date.isoformat(), 

231 "patterns": { 

232 "tool_usage": {}, 

233 "error_patterns": {}, 

234 "success_patterns": {}, 

235 "workflow_optimizations": {} 

236 }, 

237 "insights": { 

238 "most_used_tools": [], 

239 "common_errors": [], 

240 "optimization_suggestions": [] 

241 }, 

242 "metadata": { 

243 "total_days_analyzed": 0, 

244 "patterns_identified": 0, 

245 "insights_generated": 0 

246 } 

247 } 

248 

249 # Update tool usage patterns 

250 tool_usage = analysis_results.get("tool_usage", {}) 

251 for tool, count in tool_usage.items(): 

252 if tool not in patterns_data["patterns"]["tool_usage"]: 

253 patterns_data["patterns"]["tool_usage"][tool] = [] 

254 patterns_data["patterns"]["tool_usage"][tool].append({ 

255 "date": analysis_date.isoformat(), 

256 "count": count, 

257 "type": "usage" 

258 }) 

259 

260 # Update error patterns 

261 error_patterns = analysis_results.get("error_patterns", {}) 

262 for error, count in error_patterns.items(): 

263 if error not in patterns_data["patterns"]["error_patterns"]: 

264 patterns_data["patterns"]["error_patterns"][error] = [] 

265 patterns_data["patterns"]["error_patterns"][error].append({ 

266 "date": analysis_date.isoformat(), 

267 "count": count, 

268 "type": "error" 

269 }) 

270 

271 # Update success rate patterns 

272 success_rate = analysis_results.get("success_rate", 0.0) 

273 if "daily_success_rates" not in patterns_data["patterns"]: 

274 patterns_data["patterns"]["daily_success_rates"] = [] 

275 patterns_data["patterns"]["daily_success_rates"].append({ 

276 "date": analysis_date.isoformat(), 

277 "rate": success_rate 

278 }) 

279 

280 # Generate insights 

281 insights = _generate_insights(patterns_data, analysis_results) 

282 patterns_data["insights"].update(insights) 

283 

284 # Update metadata 

285 patterns_data["last_updated"] = analysis_date.isoformat() 

286 patterns_data["metadata"]["total_days_analyzed"] += 1 

287 patterns_data["metadata"]["patterns_identified"] = len(tool_usage) + len(error_patterns) 

288 patterns_data["metadata"]["insights_generated"] = len([v for v in insights.values() if v]) 

289 

290 # Save updated patterns 

291 patterns_file.write_text(json.dumps(patterns_data, indent=2), encoding="utf-8") 

292 

293 

294def _generate_insights(patterns_data: Dict[str, Any], current_results: Dict[str, Any]) -> Dict[str, Any]: 

295 """Generate insights from analysis patterns 

296 

297 Args: 

298 patterns_data: Existing patterns data 

299 current_results: Current day's analysis results 

300 

301 Returns: 

302 Dict[str, Any]: Generated insights 

303 """ 

304 insights = {} 

305 

306 # Most used tools insight 

307 tool_usage = current_results.get("tool_usage", {}) 

308 if tool_usage: 

309 most_used = max(tool_usage.items(), key=lambda x: x[1]) 

310 insights["most_used_tools"] = [{ 

311 "tool": most_used[0], 

312 "count": most_used[1], 

313 "date": datetime.now().isoformat() 

314 }] 

315 

316 # Common errors insight 

317 error_patterns = current_results.get("error_patterns", {}) 

318 if error_patterns: 

319 most_common_error = max(error_patterns.items(), key=lambda x: x[1]) 

320 insights["common_errors"] = [{ 

321 "error": most_common_error[0], 

322 "count": most_common_error[1], 

323 "date": datetime.now().isoformat() 

324 }] 

325 

326 # Optimization suggestions 

327 suggestions = [] 

328 

329 # Suggest optimization for frequently used tools 

330 if tool_usage: 

331 top_tools = sorted(tool_usage.items(), key=lambda x: x[1], reverse=True)[:3] 

332 suggestions.append({ 

333 "type": "tool_optimization", 

334 "description": f"Focus on optimizing {top_tools[0][0]} usage ({top_tools[0][1]} uses)", 

335 "priority": "high" 

336 }) 

337 

338 # Suggest error resolution 

339 if error_patterns: 

340 for error, count in error_patterns.items(): 

341 if count >= 3: # Errors occurring 3+ times 

342 suggestions.append({ 

343 "type": "error_resolution", 

344 "description": f"Address recurring error: {error} ({count} occurrences)", 

345 "priority": "critical" 

346 }) 

347 

348 insights["optimization_suggestions"] = suggestions 

349 

350 return insights 

351 

352 

353# Add to_dict method to AnalysisResult 

354AnalysisResult.to_dict = to_dict