Coverage for src/moai_adk/cli/commands/improve_user_experience.py: 0.00%

153 statements  

« prev     ^ index     » next       coverage.py v7.11.3, created at 2025-11-20 14:35 +0900

1""" 

2User Experience Improvement CLI Command 

3""" 

4 

5import argparse 

6import asyncio 

7from pathlib import Path 

8 

9from rich.console import Console 

10from rich.panel import Panel 

11from rich.progress import Progress, SpinnerColumn, TextColumn 

12from rich.table import Table 

13 

14from moai_adk.utils.user_experience import ( 

15 UserExperienceAnalyzer, 

16 generate_improvement_plan, 

17) 

18 

19console = Console() 

20 

21 

22def create_parser(subparsers) -> argparse.ArgumentParser: 

23 """Create user experience improvement parser""" 

24 parser = subparsers.add_parser( 

25 "improve-ux", 

26 help="Analyze user experience improvements", 

27 description="Analyze user experience of online documentation portal and provide improvement plan", 

28 ) 

29 

30 parser.add_argument( 

31 "--url", 

32 "-u", 

33 type=str, 

34 default="https://adk.mo.ai.kr", 

35 help="URL to analyze (default: https://adk.mo.ai.kr)", 

36 ) 

37 

38 parser.add_argument( 

39 "--output", "-o", type=str, help="File path to save analysis results" 

40 ) 

41 

42 parser.add_argument( 

43 "--format", 

44 "-f", 

45 type=str, 

46 choices=["json", "markdown", "text"], 

47 default="markdown", 

48 help="Output format (default: markdown)", 

49 ) 

50 

51 parser.add_argument( 

52 "--verbose", "-v", action="store_true", help="Display detailed progress" 

53 ) 

54 

55 parser.add_argument( 

56 "--max-workers", 

57 "-w", 

58 type=int, 

59 default=5, 

60 help="Maximum number of concurrent tasks (default: 5)", 

61 ) 

62 

63 return parser 

64 

65 

66def format_metrics_table(metrics: dict) -> Table: 

67 """Format metrics table""" 

68 table = Table(title="User Experience Metrics") 

69 table.add_column("Category", style="cyan") 

70 table.add_column("Score", style="magenta") 

71 table.add_column("Status", style="green") 

72 

73 # Performance metrics 

74 perf = metrics.get("performance") 

75 table.add_row( 

76 "Performance", 

77 f"{perf.success_rate if perf else 0:.2f}", 

78 "✅ Good" if (perf and perf.is_good) else "❌ Needs Improvement", 

79 ) 

80 

81 # Navigation metrics 

82 nav = metrics.get("navigation") 

83 table.add_row( 

84 "Navigation", 

85 f"{nav.structure_score if nav else 0:.2f}", 

86 "✅ Good" if (nav and nav.is_good) else "❌ Needs Improvement", 

87 ) 

88 

89 # Content metrics 

90 content = metrics.get("content") 

91 table.add_row( 

92 "Content", 

93 f"{content.accuracy_score if content else 0:.2f}", 

94 "✅ Good" if (content and content.is_good) else "❌ Needs Improvement", 

95 ) 

96 

97 # Accessibility metrics 

98 acc = metrics.get("accessibility") 

99 table.add_row( 

100 "Accessibility", 

101 f"{1.0 if (acc and acc.is_good) else 0.0:.2f}", 

102 "✅ Good" if (acc and acc.is_good) else "❌ Needs Improvement", 

103 ) 

104 

105 return table 

106 

107 

108def format_recommendations(recommendations: list) -> Table: 

109 """Format recommendations table""" 

110 table = Table(title="Improvement Recommendations") 

111 table.add_column("Recommendation", style="yellow") 

112 table.add_column("Priority", style="red") 

113 

114 for i, rec in enumerate(recommendations, 1): 

115 priority = "Medium" # Default 

116 if ( 

117 "error" in rec.lower() 

118 or "keyboard" in rec.lower() 

119 or "accuracy" in rec.lower() 

120 ): 

121 priority = "High" 

122 elif "performance" in rec.lower() or "structure" in rec.lower(): 

123 priority = "Medium" 

124 else: 

125 priority = "Low" 

126 

127 table.add_row(f"{i}. {rec}", priority) 

128 

129 return table 

130 

131 

132def format_timeline(timeline: dict) -> Table: 

133 """Format timeline table""" 

134 table = Table(title="Implementation Plan") 

135 table.add_column("Period", style="cyan") 

136 table.add_column("Tasks", style="magenta") 

137 

138 for period, tasks in timeline.items(): 

139 if tasks: 

140 table.add_row( 

141 period.replace("_", " ").title(), 

142 "\n".join(f"{task}" for task in tasks), 

143 ) 

144 

145 return table 

146 

147 

148async def analyze_user_experience( 

149 url: str, max_workers: int = 5, verbose: bool = False 

150) -> dict: 

151 """Perform user experience analysis""" 

152 if verbose: 

153 console.print(f"[blue]Starting user experience analysis: {url}[/blue]") 

154 

155 analyzer = UserExperienceAnalyzer(url, max_workers=max_workers) 

156 

157 with Progress( 

158 SpinnerColumn(), 

159 TextColumn("[progress.description]{task.description}"), 

160 console=console, 

161 transient=True, 

162 ) as progress: 

163 # Create analysis task 

164 analysis_task = progress.add_task("Analyzing user experience...", total=None) 

165 

166 # Execute analysis 

167 analysis_report = await analyzer.generate_report() 

168 

169 progress.update(analysis_task, completed=True) 

170 

171 return analysis_report 

172 

173 

174def generate_markdown_report(analysis_report: dict, improvement_plan: dict) -> str: 

175 """Generate markdown report""" 

176 report = [] 

177 

178 # Header 

179 report.append("# User Experience Improvement Report") 

180 report.append("") 

181 report.append(f"**Analysis Target**: {analysis_report.get('base_url', 'N/A')}") 

182 report.append(f"**Analysis Time**: {analysis_report.get('generated_at', 'N/A')}") 

183 report.append("") 

184 

185 # Overall score 

186 overall_score = analysis_report.get("overall_score", 0) 

187 report.append(f"## Overall Score: {overall_score:.2f}") 

188 report.append("") 

189 

190 # Metrics details 

191 report.append("### Metrics Details") 

192 report.append("") 

193 

194 # Performance metrics 

195 performance = analysis_report.get("performance") 

196 report.append("#### Performance") 

197 report.append( 

198 f"- Success Rate: {performance.success_rate if performance else 0:.2f}" 

199 ) 

200 report.append( 

201 f"- Average Load Time: {performance.load_time if performance else 0:.2f}s" 

202 ) 

203 report.append( 

204 f"- Response Time: {performance.response_time if performance else 0:.2f}s" 

205 ) 

206 report.append(f"- Error Rate: {performance.error_rate if performance else 0:.2f}") 

207 report.append("") 

208 

209 # Navigation metrics 

210 navigation = analysis_report.get("navigation") 

211 report.append("#### Navigation") 

212 report.append( 

213 f"- Structure Score: {navigation.structure_score if navigation else 0:.2f}" 

214 ) 

215 report.append(f"- Link Count: {navigation.link_count if navigation else 0}") 

216 report.append(f"- Depth: {navigation.depth if navigation else 0}") 

217 report.append(f"- Completeness: {navigation.completeness if navigation else 0:.2f}") 

218 report.append("") 

219 

220 # Content metrics 

221 content = analysis_report.get("content") 

222 report.append("#### Content") 

223 report.append(f"- Accuracy: {content.accuracy_score if content else 0:.2f}") 

224 report.append(f"- Completeness: {content.completeness_score if content else 0:.2f}") 

225 report.append(f"- Organization: {content.organization_score if content else 0:.2f}") 

226 report.append(f"- Readability: {content.readability_score if content else 0:.2f}") 

227 report.append("") 

228 

229 # Accessibility metrics 

230 accessibility = analysis_report.get("accessibility") 

231 report.append("#### Accessibility") 

232 report.append( 

233 f"- Keyboard Navigation: {'✅' if (accessibility and accessibility.keyboard_navigation) else '❌'}" 

234 ) 

235 report.append( 

236 f"- Screen Reader Support: {'✅' if (accessibility and accessibility.screen_reader_support) else '❌'}" 

237 ) 

238 report.append( 

239 f"- Color Contrast: {'✅' if (accessibility and accessibility.color_contrast) else '❌'}" 

240 ) 

241 report.append( 

242 f"- Responsive Design: {'✅' if (accessibility and accessibility.responsive_design) else '❌'}" 

243 ) 

244 report.append( 

245 f"- ARIA Labels: {'✅' if (accessibility and accessibility.aria_labels) else '❌'}" 

246 ) 

247 report.append("") 

248 

249 # Recommendations 

250 report.append("### Improvement Recommendations") 

251 report.append("") 

252 for rec in analysis_report.get("recommendations", []): 

253 report.append(f"- {rec}") 

254 report.append("") 

255 

256 # Implementation plan 

257 report.append("### Implementation Plan") 

258 report.append("") 

259 report.append( 

260 f"**Estimated Duration**: {improvement_plan.get('estimated_duration', 'N/A')}" 

261 ) 

262 report.append("") 

263 

264 timeline = improvement_plan.get("timeline", {}) 

265 for period, tasks in timeline.items(): 

266 if tasks: 

267 report.append(f"#### {period.replace('_', ' ').title()}") 

268 for task in tasks: 

269 report.append(f"- {task}") 

270 report.append("") 

271 

272 return "\n".join(report) 

273 

274 

275def run_command(args) -> int: 

276 """Run user experience improvement command""" 

277 try: 

278 # Execute analysis 

279 analysis_report = asyncio.run( 

280 analyze_user_experience(args.url, args.max_workers, args.verbose) 

281 ) 

282 

283 # Generate improvement plan 

284 improvement_plan = generate_improvement_plan(analysis_report) 

285 

286 # Display results 

287 if args.verbose: 

288 console.print( 

289 Panel.fit( 

290 f"Overall Score: {analysis_report['overall_score']:.2f}", 

291 title="Analysis Results", 

292 style="blue", 

293 ) 

294 ) 

295 

296 # Display metrics table 

297 metrics_table = format_metrics_table(analysis_report) 

298 console.print(metrics_table) 

299 

300 # Display recommendations 

301 if analysis_report.get("recommendations"): 

302 recommendations_table = format_recommendations( 

303 analysis_report["recommendations"] 

304 ) 

305 console.print(recommendations_table) 

306 

307 # Display implementation plan 

308 if improvement_plan.get("timeline"): 

309 timeline_table = format_timeline(improvement_plan["timeline"]) 

310 console.print(timeline_table) 

311 

312 # Save results 

313 if args.output: 

314 if args.format == "json": 

315 import json 

316 

317 result = { 

318 "analysis": analysis_report, 

319 "improvement_plan": improvement_plan, 

320 } 

321 output_content = json.dumps(result, indent=2, ensure_ascii=False) 

322 else: 

323 output_content = generate_markdown_report( 

324 analysis_report, improvement_plan 

325 ) 

326 

327 output_path = Path(args.output) 

328 output_path.write_text(output_content, encoding="utf-8") 

329 console.print(f"\n[green]Results saved:[/green] {output_path}") 

330 

331 # Return exit code 

332 if analysis_report["overall_score"] >= 0.85: 

333 console.print( 

334 f"\n[green]✅ User experience is excellent (score: {analysis_report['overall_score']:.2f})[/green]" 

335 ) 

336 return 0 

337 else: 

338 console.print( 

339 f"\n[yellow]⚠️ User experience needs improvement (score: {analysis_report['overall_score']:.2f})[/yellow]" 

340 ) 

341 return 1 

342 

343 except KeyboardInterrupt: 

344 console.print("\n[yellow]Analysis interrupted by user.[/yellow]") 

345 return 1 

346 except Exception as e: 

347 console.print(f"[red]Error occurred:[/red] {e}") 

348 return 1