Coverage for src / moai_adk / core / jit_enhanced_hook_manager.py: 19.95%
441 statements
« prev ^ index » next coverage.py v7.12.0, created at 2025-11-20 20:52 +0900
« prev ^ index » next coverage.py v7.12.0, created at 2025-11-20 20:52 +0900
1"""
2JIT-Enhanced Hook Manager
4Integrates Phase 2 JIT Context Loading System with Claude Code hook infrastructure
5to provide intelligent, phase-aware hook execution with optimal performance.
7Key Features:
8- Phase-based hook optimization
9- JIT context loading for hooks
10- Intelligent skill filtering for hook operations
11- Dynamic token budget management
12- Real-time performance monitoring
13- Smart caching and invalidation
14"""
16import asyncio
17import json
18import threading
19import time
20from dataclasses import dataclass, field
21from datetime import datetime
22from enum import Enum
23from pathlib import Path
24from typing import Any, Dict, List, Optional, Set, Tuple
26# Import JIT Context Loading System from Phase 2
27try:
28 from .jit_context_loader import ContextCache, JITContextLoader, TokenBudgetManager
29except ImportError:
30 # Fallback for environments where JIT system might not be available
31 class JITContextLoader:
32 def __init__(self, *args, **kwargs):
33 pass
35 class ContextCache:
36 def __init__(self, max_size=100, max_memory_mb=50):
37 self.max_size = max_size
38 self.max_memory_mb = max_memory_mb
39 self.hits = 0
40 self.misses = 0
41 self.cache = {}
43 def get(self, key):
44 self.misses += 1
45 return None
47 def put(self, key, value):
48 pass
50 def clear(self):
51 pass
53 def get_stats(self):
54 return {
55 'hits': self.hits,
56 'misses': self.misses
57 }
59 class TokenBudgetManager:
60 def __init__(self, *args, **kwargs):
61 pass
63# Create Phase enum for hook system
64class Phase(Enum):
65 SPEC = "SPEC"
66 RED = "RED"
67 GREEN = "GREEN"
68 REFACTOR = "REFACTOR"
69 SYNC = "SYNC"
70 DEBUG = "DEBUG"
71 PLANNING = "PLANNING"
74class HookEvent(Enum):
75 """Hook event types from Claude Code"""
76 SESSION_START = "SessionStart"
77 SESSION_END = "SessionEnd"
78 USER_PROMPT_SUBMIT = "UserPromptSubmit"
79 PRE_TOOL_USE = "PreToolUse"
80 POST_TOOL_USE = "PostToolUse"
81 SUBAGENT_START = "SubagentStart"
82 SUBAGENT_STOP = "SubagentStop"
85class HookPriority(Enum):
86 """Hook execution priority levels"""
87 CRITICAL = 1 # System-critical hooks (security, validation)
88 HIGH = 2 # High-impact hooks (performance optimization)
89 NORMAL = 3 # Standard hooks (logging, cleanup)
90 LOW = 4 # Optional hooks (analytics, metrics)
93@dataclass
94class HookMetadata:
95 """Metadata for a hook execution"""
96 hook_path: str
97 event_type: HookEvent
98 priority: HookPriority
99 estimated_execution_time_ms: float = 0.0
100 last_execution_time: Optional[datetime] = None
101 success_rate: float = 1.0
102 phase_relevance: Dict[Phase, float] = field(default_factory=dict)
103 token_cost_estimate: int = 0
104 dependencies: Set[str] = field(default_factory=set)
105 parallel_safe: bool = True
108@dataclass
109class HookExecutionResult:
110 """Result of hook execution"""
111 hook_path: str
112 success: bool
113 execution_time_ms: float
114 token_usage: int
115 output: Any
116 error_message: Optional[str] = None
117 metadata: Dict[str, Any] = field(default_factory=dict)
120@dataclass
121class HookPerformanceMetrics:
122 """Performance metrics for hook system"""
123 total_executions: int = 0
124 successful_executions: int = 0
125 average_execution_time_ms: float = 0.0
126 total_token_usage: int = 0
127 cache_hits: int = 0
128 cache_misses: int = 0
129 phase_distribution: Dict[Phase, int] = field(default_factory=dict)
130 event_type_distribution: Dict[HookEvent, int] = field(default_factory=dict)
133class JITEnhancedHookManager:
134 """
135 Enhanced Hook Manager with JIT Context Loading System integration
137 Provides intelligent hook execution with phase-aware optimization,
138 token budget management, and performance monitoring.
139 """
141 def __init__(
142 self,
143 hooks_directory: Optional[Path] = None,
144 cache_directory: Optional[Path] = None,
145 max_concurrent_hooks: int = 5,
146 enable_performance_monitoring: bool = True
147 ):
148 """Initialize JIT-Enhanced Hook Manager
150 Args:
151 hooks_directory: Directory containing hook files
152 cache_directory: Directory for hook cache and performance data
153 max_concurrent_hooks: Maximum number of hooks to execute concurrently
154 enable_performance_monitoring: Enable detailed performance tracking
155 """
156 self.hooks_directory = hooks_directory or Path.cwd() / ".claude" / "hooks"
157 self.cache_directory = cache_directory or Path.cwd() / ".moai" / "cache" / "hooks"
158 self.max_concurrent_hooks = max_concurrent_hooks
159 self.enable_performance_monitoring = enable_performance_monitoring
161 # Initialize JIT Context Loading System
162 self.jit_loader = JITContextLoader()
164 # Initialize caches and metadata storage
165 self._initialize_caches()
167 # Performance tracking
168 self.metrics = HookPerformanceMetrics()
169 self._performance_lock = threading.Lock()
171 # Hook registry with metadata
172 self._hook_registry: Dict[str, HookMetadata] = {}
173 self._hooks_by_event: Dict[HookEvent, List[str]] = {}
175 # Initialize hook registry
176 self._discover_hooks()
178 def _initialize_caches(self) -> None:
179 """Initialize cache directories and data structures"""
180 self.cache_directory.mkdir(parents=True, exist_ok=True)
182 # Initialize hook result cache
183 self._result_cache = ContextCache(
184 max_size=100,
185 max_memory_mb=50
186 )
188 # Initialize metadata cache
189 self._metadata_cache: Dict[str, Dict[str, Any]] = {}
191 # Performance log file
192 self._performance_log_path = self.cache_directory / "performance.jsonl"
194 def _discover_hooks(self) -> None:
195 """Discover and register all available hooks"""
196 if not self.hooks_directory.exists():
197 return
199 for hook_file in self.hooks_directory.rglob("*.py"):
200 if hook_file.name.startswith("__") or hook_file.name.startswith("lib/"):
201 continue
203 hook_path_str = str(hook_file.relative_to(self.hooks_directory))
205 # Extract event type from filename
206 event_type = self._extract_event_type_from_filename(hook_file.name)
207 if event_type:
208 self._register_hook(hook_path_str, event_type)
210 def _extract_event_type_from_filename(self, filename: str) -> Optional[HookEvent]:
211 """Extract hook event type from filename pattern"""
212 filename_lower = filename.lower()
214 if "session_start" in filename_lower:
215 return HookEvent.SESSION_START
216 elif "session_end" in filename_lower:
217 return HookEvent.SESSION_END
218 elif "pre_tool" in filename_lower or "pretool" in filename_lower:
219 return HookEvent.PRE_TOOL_USE
220 elif "post_tool" in filename_lower or "posttool" in filename_lower:
221 return HookEvent.POST_TOOL_USE
222 elif "subagent_start" in filename_lower:
223 return HookEvent.SUBAGENT_START
224 elif "subagent_stop" in filename_lower:
225 return HookEvent.SUBAGENT_STOP
226 else:
227 return None
229 def _register_hook(self, hook_path: str, event_type: HookEvent) -> None:
230 """Register a hook with metadata"""
231 # Generate metadata based on hook characteristics
232 metadata = HookMetadata(
233 hook_path=hook_path,
234 event_type=event_type,
235 priority=self._determine_hook_priority(hook_path, event_type),
236 estimated_execution_time_ms=self._estimate_execution_time(hook_path),
237 phase_relevance=self._determine_phase_relevance(hook_path, event_type),
238 token_cost_estimate=self._estimate_token_cost(hook_path),
239 parallel_safe=self._is_parallel_safe(hook_path)
240 )
242 self._hook_registry[hook_path] = metadata
244 if event_type not in self._hooks_by_event:
245 self._hooks_by_event[event_type] = []
246 self._hooks_by_event[event_type].append(hook_path)
248 def _determine_hook_priority(self, hook_path: str, event_type: HookEvent) -> HookPriority:
249 """Determine hook priority based on its characteristics"""
250 filename = hook_path.lower()
252 # Security and validation hooks are critical
253 if any(keyword in filename for keyword in ["security", "validation", "health_check"]):
254 return HookPriority.CRITICAL
256 # Performance optimization hooks are high priority
257 if any(keyword in filename for keyword in ["performance", "optimizer", "jit"]):
258 return HookPriority.HIGH
260 # Cleanup and logging hooks are normal priority
261 if any(keyword in filename for keyword in ["cleanup", "log", "tracker"]):
262 return HookPriority.NORMAL
264 # Analytics and metrics are low priority
265 if any(keyword in filename for keyword in ["analytics", "metrics", "stats"]):
266 return HookPriority.LOW
268 # Default priority based on event type
269 if event_type == HookEvent.PRE_TOOL_USE:
270 return HookPriority.HIGH # Pre-execution validation is important
271 elif event_type == HookEvent.SESSION_START:
272 return HookPriority.NORMAL
273 else:
274 return HookPriority.NORMAL
276 def _estimate_execution_time(self, hook_path: str) -> float:
277 """Estimate hook execution time based on historical data and characteristics"""
278 # Check cache for historical execution time
279 cache_key = f"exec_time:{hook_path}"
280 if cache_key in self._metadata_cache:
281 cached_time = self._metadata_cache[cache_key].get("avg_time_ms")
282 if cached_time:
283 return cached_time
285 # Estimate based on hook characteristics
286 filename = hook_path.lower()
288 # Hooks with git operations tend to be slower
289 if "git" in filename:
290 return 200.0 # 200ms estimate for git operations
292 # Hooks with network operations are slower
293 if any(keyword in filename for keyword in ["fetch", "api", "network"]):
294 return 500.0 # 500ms estimate for network operations
296 # Hooks with file I/O are moderate
297 if any(keyword in filename for keyword in ["read", "write", "parse"]):
298 return 50.0 # 50ms estimate for file I/O
300 # Simple hooks are fast
301 return 10.0 # 10ms estimate for simple operations
303 def _determine_phase_relevance(self, hook_path: str, event_type: HookEvent) -> Dict[Phase, float]:
304 """Determine hook relevance to different development phases"""
305 filename = hook_path.lower()
306 relevance = {}
308 # Default relevance for all phases
309 default_relevance = 0.5
311 # SPEC phase relevance
312 if any(keyword in filename for keyword in ["spec", "plan", "design", "requirement"]):
313 relevance[Phase.SPEC] = 1.0
314 else:
315 relevance[Phase.SPEC] = default_relevance
317 # RED phase relevance (testing)
318 if any(keyword in filename for keyword in ["test", "red", "tdd", "assert"]):
319 relevance[Phase.RED] = 1.0
320 else:
321 relevance[Phase.RED] = default_relevance
323 # GREEN phase relevance (implementation)
324 if any(keyword in filename for keyword in ["implement", "code", "green", "build"]):
325 relevance[Phase.GREEN] = 1.0
326 else:
327 relevance[Phase.GREEN] = default_relevance
329 # REFACTOR phase relevance
330 if any(keyword in filename for keyword in ["refactor", "optimize", "improve", "clean"]):
331 relevance[Phase.REFACTOR] = 1.0
332 else:
333 relevance[Phase.REFACTOR] = default_relevance
335 # SYNC phase relevance (documentation)
336 if any(keyword in filename for keyword in ["sync", "doc", "document", "deploy"]):
337 relevance[Phase.SYNC] = 1.0
338 else:
339 relevance[Phase.SYNC] = default_relevance
341 # DEBUG phase relevance
342 if any(keyword in filename for keyword in ["debug", "error", "troubleshoot", "log"]):
343 relevance[Phase.DEBUG] = 1.0
344 else:
345 relevance[Phase.DEBUG] = default_relevance
347 # PLANNING phase relevance
348 if any(keyword in filename for keyword in ["plan", "analysis", "strategy"]):
349 relevance[Phase.PLANNING] = 1.0
350 else:
351 relevance[Phase.PLANNING] = default_relevance
353 return relevance
355 def _estimate_token_cost(self, hook_path: str) -> int:
356 """Estimate token cost for hook execution"""
357 # Base token cost for any hook
358 base_cost = 100
360 # Additional cost based on hook characteristics
361 filename = hook_path.lower()
363 if any(keyword in filename for keyword in ["analysis", "report", "generate"]):
364 base_cost += 500 # Higher cost for analysis/generation
365 elif any(keyword in filename for keyword in ["log", "simple", "basic"]):
366 base_cost += 50 # Lower cost for simple operations
368 return base_cost
370 def _is_parallel_safe(self, hook_path: str) -> bool:
371 """Determine if hook can be executed in parallel"""
372 filename = hook_path.lower()
374 # Hooks that modify shared state are not parallel safe
375 if any(keyword in filename for keyword in ["write", "modify", "update", "delete"]):
376 return False
378 # Hooks with external dependencies might not be parallel safe
379 if any(keyword in filename for keyword in ["database", "network", "api"]):
380 return False
382 # Most hooks are parallel safe by default
383 return True
385 async def execute_hooks(
386 self,
387 event_type: HookEvent,
388 context: Dict[str, Any],
389 user_input: Optional[str] = None,
390 phase: Optional[Phase] = None,
391 max_total_execution_time_ms: float = 1000.0
392 ) -> List[HookExecutionResult]:
393 """Execute hooks for a specific event with JIT optimization
395 Args:
396 event_type: Type of hook event
397 context: Execution context data
398 user_input: User input for phase detection
399 phase: Current development phase (if known)
400 max_total_execution_time_ms: Maximum total execution time for all hooks
402 Returns:
403 List of hook execution results
404 """
405 start_time = time.time()
407 # Detect phase if not provided
408 if phase is None and user_input:
409 try:
410 phase = self.jit_loader.phase_detector.detect_phase(user_input)
411 except AttributeError:
412 # Fallback if JIT loader doesn't have phase detector
413 phase = Phase.SPEC
415 # Get relevant hooks for this event
416 hook_paths = self._hooks_by_event.get(event_type, [])
418 # Filter and prioritize hooks based on phase and performance
419 prioritized_hooks = self._prioritize_hooks(hook_paths, phase)
421 # Load optimized context using JIT system
422 optimized_context = await self._load_optimized_context(
423 event_type, context, phase, prioritized_hooks
424 )
426 # Execute hooks with optimization
427 results = await self._execute_hooks_optimized(
428 prioritized_hooks, optimized_context, max_total_execution_time_ms
429 )
431 # Update performance metrics
432 if self.enable_performance_monitoring:
433 self._update_performance_metrics(event_type, phase, results, start_time)
435 return results
437 def _prioritize_hooks(
438 self,
439 hook_paths: List[str],
440 phase: Optional[Phase]
441 ) -> List[Tuple[str, float]]:
442 """Prioritize hooks based on phase relevance and performance characteristics
444 Args:
445 hook_paths: List of hook file paths
446 phase: Current development phase
448 Returns:
449 List of (hook_path, priority_score) tuples sorted by priority
450 """
451 hook_priorities = []
453 for hook_path in hook_paths:
454 metadata = self._hook_registry.get(hook_path)
455 if not metadata:
456 continue
458 # Calculate priority score
459 priority_score = 0.0
461 # Base priority (lower number = higher priority)
462 priority_score += metadata.priority.value * 10
464 # Phase relevance bonus
465 if phase and phase in metadata.phase_relevance:
466 relevance = metadata.phase_relevance[phase]
467 priority_score -= relevance * 5 # Higher relevance = lower score (higher priority)
469 # Performance penalty (slower hooks get lower priority)
470 priority_score += metadata.estimated_execution_time_ms / 100
472 # Success rate bonus (more reliable hooks get higher priority)
473 if metadata.success_rate < 0.9:
474 priority_score += 5 # Penalize unreliable hooks
476 hook_priorities.append((hook_path, priority_score))
478 # Sort by priority score (lower is better)
479 hook_priorities.sort(key=lambda x: x[1])
481 return hook_priorities
483 async def _load_optimized_context(
484 self,
485 event_type: HookEvent,
486 context: Dict[str, Any],
487 phase: Optional[Phase],
488 prioritized_hooks: List[Tuple[str, float]]
489 ) -> Dict[str, Any]:
490 """Load optimized context using JIT system for hook execution
492 Args:
493 event_type: Hook event type
494 context: Original context
495 phase: Current development phase
496 prioritized_hooks: List of prioritized hooks
498 Returns:
499 Optimized context with relevant information
500 """
501 # Create synthetic user input for context loading
502 synthetic_input = f"Hook execution for {event_type.value}"
503 if phase:
504 synthetic_input += f" during {phase.value} phase"
506 # Load context using JIT system
507 try:
508 jit_context, context_metrics = await self.jit_loader.load_context(
509 user_input=synthetic_input,
510 context=context
511 )
512 except (TypeError, AttributeError):
513 # Fallback to basic context if JIT loader interface is different
514 jit_context = context.copy()
516 # Add hook-specific context
517 optimized_context = jit_context.copy()
518 optimized_context.update({
519 "hook_event_type": event_type.value,
520 "hook_phase": phase.value if phase else None,
521 "hook_execution_mode": "optimized",
522 "prioritized_hooks": [hook_path for hook_path, _ in prioritized_hooks[:5]] # Top 5 hooks
523 })
525 return optimized_context
527 async def _execute_hooks_optimized(
528 self,
529 prioritized_hooks: List[Tuple[str, float]],
530 context: Dict[str, Any],
531 max_total_execution_time_ms: float
532 ) -> List[HookExecutionResult]:
533 """Execute hooks with optimization and time management
535 Args:
536 prioritized_hooks: List of (hook_path, priority_score) tuples
537 context: Optimized execution context
538 max_total_execution_time_ms: Maximum total execution time
540 Returns:
541 List of hook execution results
542 """
543 results = []
544 remaining_time = max_total_execution_time_ms
546 # Separate hooks into parallel-safe and sequential
547 parallel_hooks = []
548 sequential_hooks = []
550 for hook_path, _ in prioritized_hooks:
551 metadata = self._hook_registry.get(hook_path)
552 if metadata and metadata.parallel_safe:
553 parallel_hooks.append(hook_path)
554 else:
555 sequential_hooks.append(hook_path)
557 # Execute parallel hooks first (faster)
558 if parallel_hooks and remaining_time > 0:
559 parallel_results = await self._execute_hooks_parallel(
560 parallel_hooks, context, remaining_time
561 )
562 results.extend(parallel_results)
564 # Update remaining time
565 total_parallel_time = sum(r.execution_time_ms for r in parallel_results)
566 remaining_time -= total_parallel_time
568 # Execute sequential hooks with remaining time
569 if sequential_hooks and remaining_time > 0:
570 sequential_results = await self._execute_hooks_sequential(
571 sequential_hooks, context, remaining_time
572 )
573 results.extend(sequential_results)
575 return results
577 async def _execute_hooks_parallel(
578 self,
579 hook_paths: List[str],
580 context: Dict[str, Any],
581 max_total_time_ms: float
582 ) -> List[HookExecutionResult]:
583 """Execute hooks in parallel with time management"""
584 results = []
586 # Create semaphore to limit concurrent executions
587 semaphore = asyncio.Semaphore(self.max_concurrent_hooks)
589 async def execute_single_hook(hook_path: str) -> Optional[HookExecutionResult]:
590 async with semaphore:
591 try:
592 return await self._execute_single_hook(hook_path, context)
593 except Exception as e:
594 return HookExecutionResult(
595 hook_path=hook_path,
596 success=False,
597 execution_time_ms=0.0,
598 token_usage=0,
599 output=None,
600 error_message=str(e)
601 )
603 # Execute hooks with timeout
604 tasks = [execute_single_hook(hook_path) for hook_path in hook_paths]
606 try:
607 # Wait for all hooks with total timeout
608 completed_results = await asyncio.wait_for(
609 asyncio.gather(*tasks, return_exceptions=True),
610 timeout=max_total_time_ms / 1000.0
611 )
613 for result in completed_results:
614 if isinstance(result, HookExecutionResult):
615 results.append(result)
616 elif isinstance(result, Exception):
617 # Handle exceptions
618 error_result = HookExecutionResult(
619 hook_path="unknown",
620 success=False,
621 execution_time_ms=0.0,
622 token_usage=0,
623 output=None,
624 error_message=str(result)
625 )
626 results.append(error_result)
628 except asyncio.TimeoutError:
629 # Some hooks didn't complete in time
630 pass
632 return results
634 async def _execute_hooks_sequential(
635 self,
636 hook_paths: List[str],
637 context: Dict[str, Any],
638 max_total_time_ms: float
639 ) -> List[HookExecutionResult]:
640 """Execute hooks sequentially with time management"""
641 results = []
642 remaining_time = max_total_time_ms
644 for hook_path in hook_paths:
645 if remaining_time <= 0:
646 break
648 try:
649 result = await self._execute_single_hook(hook_path, context)
650 results.append(result)
652 # Update remaining time
653 execution_time = result.execution_time_ms
654 remaining_time -= execution_time
656 except Exception as e:
657 error_result = HookExecutionResult(
658 hook_path=hook_path,
659 success=False,
660 execution_time_ms=0.0,
661 token_usage=0,
662 output=None,
663 error_message=str(e)
664 )
665 results.append(error_result)
667 return results
669 async def _execute_single_hook(
670 self,
671 hook_path: str,
672 context: Dict[str, Any]
673 ) -> HookExecutionResult:
674 """Execute a single hook and return result
676 Args:
677 hook_path: Path to hook file
678 context: Execution context
680 Returns:
681 Hook execution result
682 """
683 start_time = time.time()
684 full_hook_path = self.hooks_directory / hook_path
686 try:
687 # Check cache for recent results
688 cache_key = f"hook_result:{hook_path}:{hash(str(context))}"
689 cached_result = self._result_cache.get(cache_key)
690 if cached_result:
691 return cached_result
693 # Prepare hook execution
694 metadata = self._hook_registry.get(hook_path)
695 if not metadata:
696 raise ValueError(f"Hook metadata not found for {hook_path}")
698 # Execute hook in subprocess for isolation
699 result = await self._execute_hook_subprocess(
700 full_hook_path, context, metadata
701 )
703 # Cache successful results
704 if result.success:
705 self._result_cache.put(cache_key, result)
707 # Update metadata
708 self._update_hook_metadata(hook_path, result)
710 return result
712 except Exception as e:
713 execution_time = (time.time() - start_time) * 1000
715 return HookExecutionResult(
716 hook_path=hook_path,
717 success=False,
718 execution_time_ms=execution_time,
719 token_usage=0,
720 output=None,
721 error_message=str(e)
722 )
724 async def _execute_hook_subprocess(
725 self,
726 hook_path: Path,
727 context: Dict[str, Any],
728 metadata: HookMetadata
729 ) -> HookExecutionResult:
730 """Execute hook in isolated subprocess
732 Args:
733 hook_path: Full path to hook file
734 context: Execution context
735 metadata: Hook metadata
737 Returns:
738 Hook execution result
739 """
740 start_time = time.time()
742 try:
743 # Prepare input for hook
744 hook_input = json.dumps(context)
746 # Execute hook with timeout
747 timeout_seconds = max(1.0, metadata.estimated_execution_time_ms / 1000.0)
749 process = await asyncio.create_subprocess_exec(
750 "uv", "run", str(hook_path),
751 stdin=asyncio.subprocess.PIPE,
752 stdout=asyncio.subprocess.PIPE,
753 stderr=asyncio.subprocess.PIPE,
754 cwd=Path.cwd()
755 )
757 try:
758 stdout, stderr = await asyncio.wait_for(
759 process.communicate(input=hook_input.encode()),
760 timeout=timeout_seconds
761 )
762 except asyncio.TimeoutError:
763 process.kill()
764 await process.wait()
765 raise TimeoutError(f"Hook execution timed out after {timeout_seconds}s")
767 execution_time_ms = (time.time() - start_time) * 1000
768 success = process.returncode == 0
770 # Parse output
771 output = None
772 if stdout:
773 try:
774 output = json.loads(stdout.decode())
775 except json.JSONDecodeError:
776 output = stdout.decode()
778 error_message = None
779 if stderr:
780 error_message = stderr.decode().strip()
781 elif process.returncode != 0:
782 error_message = f"Hook exited with code {process.returncode}"
784 return HookExecutionResult(
785 hook_path=str(hook_path.relative_to(self.hooks_directory)),
786 success=success,
787 execution_time_ms=execution_time_ms,
788 token_usage=metadata.token_cost_estimate,
789 output=output,
790 error_message=error_message
791 )
793 except Exception as e:
794 execution_time_ms = (time.time() - start_time) * 1000
796 return HookExecutionResult(
797 hook_path=str(hook_path.relative_to(self.hooks_directory)),
798 success=False,
799 execution_time_ms=execution_time_ms,
800 token_usage=metadata.token_cost_estimate,
801 output=None,
802 error_message=str(e)
803 )
805 def _update_hook_metadata(self, hook_path: str, result: HookExecutionResult) -> None:
806 """Update hook metadata based on execution result"""
807 metadata = self._hook_registry.get(hook_path)
808 if not metadata:
809 return
811 # Update execution time estimate
812 cache_key = f"exec_time:{hook_path}"
813 if cache_key not in self._metadata_cache:
814 self._metadata_cache[cache_key] = {"count": 0, "total_time": 0.0}
816 cache_entry = self._metadata_cache[cache_key]
817 cache_entry["count"] += 1
818 cache_entry["total_time"] += result.execution_time_ms
819 cache_entry["avg_time_ms"] = cache_entry["total_time"] / cache_entry["count"]
821 # Update success rate
822 metadata.success_rate = (metadata.success_rate * 0.8) + (1.0 if result.success else 0.0) * 0.2
823 metadata.last_execution_time = datetime.now()
825 def _update_performance_metrics(
826 self,
827 event_type: HookEvent,
828 phase: Optional[Phase],
829 results: List[HookExecutionResult],
830 start_time: float
831 ) -> None:
832 """Update performance metrics"""
833 with self._performance_lock:
834 self.metrics.total_executions += len(results)
835 self.metrics.successful_executions += sum(1 for r in results if r.success)
837 total_execution_time = sum(r.execution_time_ms for r in results)
838 self.metrics.average_execution_time_ms = (
839 (self.metrics.average_execution_time_ms * 0.9) +
840 (total_execution_time / len(results) * 0.1)
841 )
843 self.metrics.total_token_usage += sum(r.token_usage for r in results)
845 if phase:
846 self.metrics.phase_distribution[phase] = (
847 self.metrics.phase_distribution.get(phase, 0) + 1
848 )
850 self.metrics.event_type_distribution[event_type] = (
851 self.metrics.event_type_distribution.get(event_type, 0) + 1
852 )
854 # Log performance data
855 self._log_performance_data(event_type, phase, results, start_time)
857 def _log_performance_data(
858 self,
859 event_type: HookEvent,
860 phase: Optional[Phase],
861 results: List[HookExecutionResult],
862 start_time: float
863 ) -> None:
864 """Log performance data to file"""
865 log_entry = {
866 "timestamp": datetime.now().isoformat(),
867 "event_type": event_type.value,
868 "phase": phase.value if phase else None,
869 "total_hooks": len(results),
870 "successful_hooks": sum(1 for r in results if r.success),
871 "total_execution_time_ms": sum(r.execution_time_ms for r in results),
872 "total_token_usage": sum(r.token_usage for r in results),
873 "system_time_ms": (time.time() - start_time) * 1000,
874 "results": [
875 {
876 "hook_path": r.hook_path,
877 "success": r.success,
878 "execution_time_ms": r.execution_time_ms,
879 "token_usage": r.token_usage,
880 "error_message": r.error_message
881 }
882 for r in results
883 ]
884 }
886 try:
887 with open(self._performance_log_path, "a") as f:
888 f.write(json.dumps(log_entry) + "\n")
889 except Exception:
890 pass # Silently fail on logging
892 def get_performance_metrics(self) -> HookPerformanceMetrics:
893 """Get current performance metrics"""
894 with self._performance_lock:
895 return HookPerformanceMetrics(
896 total_executions=self.metrics.total_executions,
897 successful_executions=self.metrics.successful_executions,
898 average_execution_time_ms=self.metrics.average_execution_time_ms,
899 total_token_usage=self.metrics.total_token_usage,
900 cache_hits=self._result_cache.get_stats().get("hits", 0),
901 cache_misses=self._result_cache.get_stats().get("misses", 0),
902 phase_distribution=self.metrics.phase_distribution.copy(),
903 event_type_distribution=self.metrics.event_type_distribution.copy()
904 )
906 def get_hook_recommendations(
907 self,
908 event_type: Optional[HookEvent] = None,
909 phase: Optional[Phase] = None
910 ) -> Dict[str, Any]:
911 """Get recommendations for hook optimization
913 Args:
914 event_type: Specific event type to analyze
915 phase: Specific phase to analyze
917 Returns:
918 Dictionary with optimization recommendations
919 """
920 recommendations = {
921 "slow_hooks": [],
922 "unreliable_hooks": [],
923 "phase_mismatched_hooks": [],
924 "optimization_suggestions": []
925 }
927 # Analyze hook performance
928 for hook_path, metadata in self._hook_registry.items():
929 if event_type and metadata.event_type != event_type:
930 continue
932 # Check for slow hooks
933 if metadata.estimated_execution_time_ms > 200:
934 recommendations["slow_hooks"].append({
935 "hook_path": hook_path,
936 "estimated_time_ms": metadata.estimated_execution_time_ms,
937 "suggestion": "Consider optimizing or making this hook parallel-safe"
938 })
940 # Check for unreliable hooks
941 if metadata.success_rate < 0.8:
942 recommendations["unreliable_hooks"].append({
943 "hook_path": hook_path,
944 "success_rate": metadata.success_rate,
945 "suggestion": "Review error handling and improve reliability"
946 })
948 # Check for phase mismatch
949 if phase:
950 relevance = metadata.phase_relevance.get(phase, 0.0)
951 if relevance < 0.3:
952 recommendations["phase_mismatched_hooks"].append({
953 "hook_path": hook_path,
954 "phase": phase.value,
955 "relevance": relevance,
956 "suggestion": "This hook may not be relevant for the current phase"
957 })
959 # Generate optimization suggestions
960 if recommendations["slow_hooks"]:
961 recommendations["optimization_suggestions"].append(
962 "Consider implementing caching for frequently executed slow hooks"
963 )
965 if recommendations["unreliable_hooks"]:
966 recommendations["optimization_suggestions"].append(
967 "Add retry logic and better error handling for unreliable hooks"
968 )
970 if recommendations["phase_mismatched_hooks"]:
971 recommendations["optimization_suggestions"].append(
972 "Use phase-based hook filtering to skip irrelevant hooks"
973 )
975 return recommendations
977 async def cleanup(self) -> None:
978 """Cleanup resources and save state"""
979 # Save performance metrics
980 metrics_file = self.cache_directory / "metrics.json"
981 try:
982 metrics_data = {
983 "timestamp": datetime.now().isoformat(),
984 "metrics": self.get_performance_metrics().__dict__,
985 "hook_metadata": {
986 hook_path: {
987 "estimated_execution_time_ms": metadata.estimated_execution_time_ms,
988 "success_rate": metadata.success_rate,
989 "last_execution_time": metadata.last_execution_time.isoformat() if metadata.last_execution_time else None
990 }
991 for hook_path, metadata in self._hook_registry.items()
992 }
993 }
995 with open(metrics_file, "w") as f:
996 json.dump(metrics_data, f, indent=2)
997 except Exception:
998 pass
1000 # Clear caches
1001 self._result_cache.clear()
1002 self._metadata_cache.clear()
1005# Global instance for easy access
1006_jit_hook_manager: Optional[JITEnhancedHookManager] = None
1009def get_jit_hook_manager() -> JITEnhancedHookManager:
1010 """Get or create global JIT hook manager instance"""
1011 global _jit_hook_manager
1012 if _jit_hook_manager is None:
1013 _jit_hook_manager = JITEnhancedHookManager()
1014 return _jit_hook_manager
1017# Convenience functions for common hook operations
1018async def execute_session_start_hooks(
1019 context: Dict[str, Any],
1020 user_input: Optional[str] = None
1021) -> List[HookExecutionResult]:
1022 """Execute SessionStart hooks with JIT optimization"""
1023 manager = get_jit_hook_manager()
1024 return await manager.execute_hooks(
1025 HookEvent.SESSION_START,
1026 context,
1027 user_input=user_input
1028 )
1031async def execute_pre_tool_hooks(
1032 context: Dict[str, Any],
1033 user_input: Optional[str] = None
1034) -> List[HookExecutionResult]:
1035 """Execute PreToolUse hooks with JIT optimization"""
1036 manager = get_jit_hook_manager()
1037 return await manager.execute_hooks(
1038 HookEvent.PRE_TOOL_USE,
1039 context,
1040 user_input=user_input
1041 )
1044async def execute_session_end_hooks(
1045 context: Dict[str, Any],
1046 user_input: Optional[str] = None
1047) -> List[HookExecutionResult]:
1048 """Execute SessionEnd hooks with JIT optimization"""
1049 manager = get_jit_hook_manager()
1050 return await manager.execute_hooks(
1051 HookEvent.SESSION_END,
1052 context,
1053 user_input=user_input
1054 )
1057def get_hook_performance_metrics() -> HookPerformanceMetrics:
1058 """Get current hook performance metrics"""
1059 manager = get_jit_hook_manager()
1060 return manager.get_performance_metrics()
1063def get_hook_optimization_recommendations(
1064 event_type: Optional[HookEvent] = None,
1065 phase: Optional[Phase] = None
1066) -> Dict[str, Any]:
1067 """Get hook optimization recommendations"""
1068 manager = get_jit_hook_manager()
1069 return manager.get_hook_recommendations(event_type, phase)
1072if __name__ == "__main__":
1073 # Example usage and testing
1074 async def test_jit_hook_manager():
1075 manager = JITEnhancedHookManager()
1077 # Test hook execution
1078 context = {"test": True, "user": "test_user"}
1079 results = await manager.execute_hooks(
1080 HookEvent.SESSION_START,
1081 context,
1082 user_input="Testing JIT enhanced hook system"
1083 )
1085 print(f"Executed {len(results)} hooks")
1086 for result in results:
1087 print(f" {result.hook_path}: {'✓' if result.success else '✗'} ({result.execution_time_ms:.1f}ms)")
1089 # Show metrics
1090 metrics = manager.get_performance_metrics()
1091 print("\nPerformance Metrics:")
1092 print(f" Total executions: {metrics.total_executions}")
1093 print(f" Success rate: {metrics.successful_executions}/{metrics.total_executions}")
1094 print(f" Avg execution time: {metrics.average_execution_time_ms:.1f}ms")
1096 # Cleanup
1097 await manager.cleanup()
1099 # Run test
1100 asyncio.run(test_jit_hook_manager())