Coverage for src/moai_adk/utils/user_experience.py: 0.00%
184 statements
« prev ^ index » next coverage.py v7.11.3, created at 2025-11-20 14:35 +0900
« prev ^ index » next coverage.py v7.11.3, created at 2025-11-20 14:35 +0900
1"""
2User Experience Enhancement Utilities
4This module provides utilities for analyzing and improving user experience
5across multiple dimensions including performance, navigation, content quality,
6and accessibility.
7"""
9import asyncio
10import logging
11import time
12from dataclasses import dataclass, field
13from datetime import datetime
14from typing import Any, Dict, List, Tuple
15from urllib.parse import urljoin
17import aiohttp
19from moai_adk.utils.common import HTTPClient
21logger = logging.getLogger(__name__)
24@dataclass
25class PerformanceMetrics:
26 """Performance metrics for user experience analysis"""
28 load_time: float
29 response_time: float
30 success_rate: float
31 throughput: float
32 error_rate: float
33 timestamp: datetime = field(default_factory=datetime.now)
35 @property
36 def is_good(self) -> bool:
37 """Check if performance meets quality thresholds"""
38 return (
39 self.load_time <= 2.0
40 and self.response_time <= 1.0
41 and self.success_rate >= 0.9
42 and self.throughput >= 10
43 and self.error_rate <= 0.1
44 )
47@dataclass
48class NavigationMetrics:
49 """Navigation structure metrics for UX analysis"""
51 structure_score: float
52 link_count: int
53 depth: int
54 completeness: float
55 timestamp: datetime = field(default_factory=datetime.now)
57 @property
58 def is_good(self) -> bool:
59 """Check if navigation structure meets quality thresholds"""
60 return (
61 self.structure_score >= 0.8
62 and self.link_count >= 5
63 and self.depth <= 3
64 and self.completeness >= 0.9
65 )
68@dataclass
69class ContentMetrics:
70 """Content quality metrics for UX analysis"""
72 accuracy_score: float
73 completeness_score: float
74 organization_score: float
75 readability_score: float
76 timestamp: datetime = field(default_factory=datetime.now)
78 @property
79 def is_good(self) -> bool:
80 """Check if content quality meets quality thresholds"""
81 return (
82 self.accuracy_score >= 0.9
83 and self.completeness_score >= 0.9
84 and self.organization_score >= 0.8
85 and self.readability_score >= 0.8
86 )
89@dataclass
90class AccessibilityMetrics:
91 """Accessibility metrics for UX analysis"""
93 keyboard_navigation: bool
94 screen_reader_support: bool
95 color_contrast: bool
96 responsive_design: bool
97 aria_labels: bool
98 timestamp: datetime = field(default_factory=datetime.now)
100 @property
101 def is_good(self) -> bool:
102 """Check if accessibility meets quality thresholds"""
103 return all(
104 [
105 self.keyboard_navigation,
106 self.screen_reader_support,
107 self.color_contrast,
108 self.responsive_design,
109 self.aria_labels,
110 ]
111 )
114class UserExperienceAnalyzer(HTTPClient):
115 """
116 User experience analyzer for comprehensive UX assessment
118 Analyzes multiple dimensions of user experience including performance,
119 navigation, content quality, and accessibility. Provides actionable
120 recommendations for improvement.
121 """
123 def __init__(self, base_url: str, max_workers: int = 5):
124 super().__init__(max_concurrent=max_workers, timeout=10)
125 self.base_url = base_url
127 async def __aenter__(self):
128 """Enter async context manager"""
129 connector = aiohttp.TCPConnector(limit=self.max_concurrent)
130 timeout = aiohttp.ClientTimeout(total=self.timeout)
131 self.session = aiohttp.ClientSession(
132 connector=connector,
133 timeout=timeout,
134 headers={
135 "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
136 },
137 )
138 return self
140 async def __aexit__(self, exc_type, exc_val, exc_tb):
141 """Exit async context manager"""
142 if self.session:
143 await self.session.close()
145 async def analyze_performance(self) -> PerformanceMetrics:
146 """
147 Analyze website performance metrics
149 Tests multiple pages concurrently to measure load times, success rates,
150 throughput, and error rates.
151 """
152 # Test multiple pages with concurrent loading
153 pages = [
154 self.base_url,
155 urljoin(self.base_url, "/getting-started"),
156 urljoin(self.base_url, "/api"),
157 urljoin(self.base_url, "/guides"),
158 urljoin(self.base_url, "/search"),
159 ]
161 load_times = []
162 success_count = 0
163 total_requests = len(pages)
165 async def load_page(url: str) -> Tuple[float, bool]:
166 page_start = time.time()
167 try:
168 response = await self.fetch_url(url)
169 load_time = time.time() - page_start
170 success = response.success
171 return load_time, success
172 except Exception:
173 load_time = time.time() - page_start
174 return load_time, False
176 # Load all pages concurrently
177 tasks = [load_page(url) for url in pages]
178 results = await asyncio.gather(*tasks)
180 # Analyze results
181 total_load_time = 0.0
182 success_count = 0
184 for load_time, success in results:
185 load_times.append(load_time)
186 total_load_time += load_time
187 if success:
188 success_count += 1
190 avg_load_time = total_load_time / total_requests if total_requests > 0 else 0
191 success_rate = success_count / total_requests if total_requests > 0 else 0
193 # Mock measurements (actual implementation would use real metrics)
194 response_time = 0.5 # Mock response time
195 throughput = 15.0 # Mock throughput
196 error_rate = 1.0 - success_rate
198 return PerformanceMetrics(
199 load_time=avg_load_time,
200 response_time=response_time,
201 success_rate=success_rate,
202 throughput=throughput,
203 error_rate=error_rate,
204 )
206 async def analyze_navigation(self) -> NavigationMetrics:
207 """
208 Analyze navigation structure
210 Evaluates navigation hierarchy, link distribution, and completeness
211 to assess ease of navigation.
212 """
213 # Mock navigation data (actual implementation would perform real crawling)
214 navigation_data: Dict[str, Any] = {
215 "main_links": ["Getting Started", "API Documentation", "Guides", "Search"],
216 "sub_links": {
217 "Getting Started": ["Installation", "Configuration", "First Steps"],
218 "API Documentation": ["Core API", "Authentication", "Webhooks"],
219 "Guides": ["Best Practices", "Examples", "Troubleshooting"],
220 "Search": ["Advanced Search", "Filters", "Results"],
221 },
222 "depth": 2,
223 "total_links": 15,
224 }
226 # Calculate structure score
227 structure_score = self._calculate_structure_score(navigation_data)
229 return NavigationMetrics(
230 structure_score=structure_score,
231 link_count=int(navigation_data["total_links"]),
232 depth=int(navigation_data["depth"]),
233 completeness=0.95,
234 )
236 def _calculate_structure_score(self, navigation_data: Dict) -> float:
237 """
238 Calculate navigation structure score
240 Considers link balance and hierarchical structure to determine
241 overall navigation quality.
242 """
243 main_links = len(navigation_data["main_links"])
244 sub_links_count = sum(
245 len(links) for links in navigation_data["sub_links"].values()
246 )
248 # Calculate structure score (considering link balance and hierarchy)
249 balance_score = min(1.0, main_links / 4.0) # Main link balance
250 hierarchy_score = max(
251 0.5, 1.0 - navigation_data["depth"] / 5.0
252 ) # Hierarchy depth
253 coverage_score = min(1.0, sub_links_count / 20.0) # Sub-link coverage
255 return (balance_score + hierarchy_score + coverage_score) / 3.0
257 async def analyze_content(self) -> ContentMetrics:
258 """
259 Analyze content quality
261 Evaluates content accuracy, completeness, organization, and readability
262 to assess overall content quality.
263 """
264 # Mock content data (actual implementation would perform real content analysis)
265 content_data = {
266 "word_count": 5000,
267 "code_examples": 25,
268 "images": 15,
269 "links": 30,
270 "readability_score": 8.5,
271 "completeness_score": 0.95,
272 }
274 # Calculate accuracy score
275 accuracy_score = self._calculate_accuracy_score(content_data)
276 organization_score = self._calculate_organization_score(content_data)
277 readability_score = content_data["readability_score"] / 10.0
279 return ContentMetrics(
280 accuracy_score=accuracy_score,
281 completeness_score=content_data["completeness_score"],
282 organization_score=organization_score,
283 readability_score=readability_score,
284 )
286 def _calculate_accuracy_score(self, content_data: Dict) -> float:
287 """
288 Calculate content accuracy score
290 Evaluates the presence and quality of code examples, images, and links
291 to determine content accuracy.
292 """
293 code_examples_ratio = min(1.0, content_data["code_examples"] / 20.0)
294 images_ratio = min(1.0, content_data["images"] / 10.0)
295 links_ratio = min(1.0, content_data["links"] / 25.0)
297 return (code_examples_ratio + images_ratio + links_ratio) / 3.0
299 def _calculate_organization_score(self, content_data: Dict) -> float:
300 """
301 Calculate content organization score
303 Evaluates content structure and word count to determine organization quality.
304 """
305 word_count_ratio = min(1.0, content_data["word_count"] / 5000.0)
306 structure_score = min(1.0, content_data["code_examples"] / 15.0)
308 return (word_count_ratio + structure_score) / 2.0
310 async def analyze_accessibility(self) -> AccessibilityMetrics:
311 """
312 Analyze accessibility compliance
314 Evaluates keyboard navigation, screen reader support, color contrast,
315 responsive design, and ARIA labels to assess accessibility.
316 """
317 # Mock accessibility data (actual implementation would perform real accessibility checks)
318 accessibility_data = {
319 "keyboard_navigation": True,
320 "screen_reader_support": True,
321 "color_contrast": True,
322 "responsive_design": True,
323 "aria_labels": True,
324 }
326 return AccessibilityMetrics(
327 keyboard_navigation=accessibility_data["keyboard_navigation"],
328 screen_reader_support=accessibility_data["screen_reader_support"],
329 color_contrast=accessibility_data["color_contrast"],
330 responsive_design=accessibility_data["responsive_design"],
331 aria_labels=accessibility_data["aria_labels"],
332 )
334 async def generate_report(self) -> Dict[str, Any]:
335 """
336 Generate comprehensive user experience report
338 Analyzes all UX dimensions concurrently and provides an overall score
339 with actionable recommendations for improvement.
340 """
341 async with self:
342 # Analyze all metrics concurrently
343 performance_task = self.analyze_performance()
344 navigation_task = self.analyze_navigation()
345 content_task = self.analyze_content()
346 accessibility_task = self.analyze_accessibility()
348 # Execute all analyses in parallel
349 performance, navigation, content, accessibility = await asyncio.gather(
350 performance_task, navigation_task, content_task, accessibility_task
351 )
353 # Calculate overall score
354 overall_score = (
355 performance.success_rate * 0.3
356 + navigation.structure_score * 0.2
357 + content.accuracy_score * 0.3
358 + (1 if accessibility.is_good else 0) * 0.2
359 )
361 # Generate improvement recommendations
362 recommendations = self._generate_recommendations(
363 performance, navigation, content, accessibility
364 )
366 return {
367 "overall_score": overall_score,
368 "performance": performance,
369 "navigation": navigation,
370 "content": content,
371 "accessibility": accessibility,
372 "recommendations": recommendations,
373 "generated_at": datetime.now().isoformat(),
374 }
376 def _generate_recommendations(
377 self,
378 performance: PerformanceMetrics,
379 navigation: NavigationMetrics,
380 content: ContentMetrics,
381 accessibility: AccessibilityMetrics,
382 ) -> List[str]:
383 """
384 Generate improvement recommendations
386 Analyzes all metrics to provide specific, actionable recommendations
387 for improving user experience.
388 """
389 recommendations = []
391 # Performance improvement recommendations
392 if not performance.is_good:
393 if performance.load_time > 2.0:
394 recommendations.append(
395 "Improve page load time: Optimize images, use CDN, consider code splitting"
396 )
397 if performance.error_rate > 0.1:
398 recommendations.append(
399 "Improve error handling: Enhance 404 pages, improve error messages"
400 )
402 # Navigation improvement recommendations
403 if not navigation.is_good:
404 if navigation.structure_score < 0.8:
405 recommendations.append(
406 "Redesign navigation structure: Simplify hierarchy, reorganize categories"
407 )
408 if navigation.completeness < 0.9:
409 recommendations.append(
410 "Improve link completeness: Connect missing pages, add breadcrumb links"
411 )
413 # Content improvement recommendations
414 if not content.is_good:
415 if content.accuracy_score < 0.9:
416 recommendations.append(
417 "Improve content accuracy: Update information, validate code examples"
418 )
419 if content.organization_score < 0.8:
420 recommendations.append(
421 "Improve content structure: Add section divisions, table of contents, related links"
422 )
424 # Accessibility improvement recommendations
425 if not accessibility.is_good:
426 if not accessibility.keyboard_navigation:
427 recommendations.append(
428 "Improve keyboard navigation: Optimize tab order, add keyboard shortcuts"
429 )
430 if not accessibility.screen_reader_support:
431 recommendations.append(
432 "Improve screen reader support: Add ARIA labels, use semantic HTML"
433 )
435 return recommendations
438def generate_improvement_plan(analysis_report: Dict[str, Any]) -> Dict[str, Any]:
439 """
440 Generate improvement plan from analysis report
442 Creates a prioritized action plan based on UX analysis results, categorizing
443 improvements by priority and timeline.
444 """
445 overall_score = analysis_report["overall_score"]
447 # Set priorities
448 priorities: Dict[str, List[str]] = {"high": [], "medium": [], "low": []}
450 # Performance priorities
451 performance = analysis_report["performance"]
452 if not performance.is_good:
453 if performance.error_rate > 0.2:
454 priorities["high"].append("Improve error handling system")
455 elif performance.load_time > 3.0:
456 priorities["high"].append("Improve load time")
457 else:
458 priorities["medium"].append("Optimize performance")
460 # Content priorities
461 content = analysis_report["content"]
462 if not content.is_good:
463 if content.accuracy_score < 0.8:
464 priorities["high"].append("Validate content accuracy")
465 elif content.completeness_score < 0.8:
466 priorities["medium"].append("Improve content completeness")
467 else:
468 priorities["low"].append("Fine-tune content")
470 # Accessibility priorities
471 accessibility = analysis_report["accessibility"]
472 if not accessibility.is_good:
473 if not accessibility.keyboard_navigation:
474 priorities["high"].append("Improve keyboard accessibility")
475 elif not accessibility.screen_reader_support:
476 priorities["high"].append("Improve screen reader support")
477 else:
478 priorities["medium"].append("Ensure accessibility standards compliance")
480 # Generate execution plan
481 timeline = {
482 "immediate": priorities["high"],
483 "short_term": priorities["medium"],
484 "long_term": priorities["low"],
485 }
487 return {
488 "overall_score": overall_score,
489 "priorities": priorities,
490 "timeline": timeline,
491 "estimated_duration": (
492 f"{len(priorities['high']) + len(priorities['medium']) * 2 + len(priorities['low']) * 3} weeks"
493 ),
494 "success_criteria": {
495 "performance_score": 0.9,
496 "content_score": 0.9,
497 "accessibility_score": 1.0,
498 "overall_score": 0.85,
499 },
500 }
503if __name__ == "__main__":
504 # User experience analysis execution example
505 analyzer = UserExperienceAnalyzer("https://adk.mo.ai.kr")
507 async def main():
508 analysis_report = await analyzer.generate_report()
510 print("=== User Experience Analysis Report ===")
511 print(f"Overall Score: {analysis_report['overall_score']:.2f}")
512 print(f"Performance Score: {analysis_report['performance'].success_rate:.2f}")
513 print(f"Navigation Score: {analysis_report['navigation'].structure_score:.2f}")
514 print(f"Content Score: {analysis_report['content'].accuracy_score:.2f}")
515 print(
516 f"Accessibility Score: {1.0 if analysis_report['accessibility'].is_good else 0.0:.2f}"
517 )
519 print("\nImprovement Recommendations:")
520 for recommendation in analysis_report["recommendations"]:
521 print(f"- {recommendation}")
523 # Generate improvement plan
524 improvement_plan = generate_improvement_plan(analysis_report)
525 print("\nImprovement Plan:")
526 print(f"Estimated Duration: {improvement_plan['estimated_duration']}")
527 print(f"Immediate Actions: {improvement_plan['timeline']['immediate']}")
528 print(f"Short-term Actions: {improvement_plan['timeline']['short_term']}")
529 print(f"Long-term Actions: {improvement_plan['timeline']['long_term']}")
531 asyncio.run(main())