Coverage for .claude/hooks/moai/lib/state_tracking.py: 0.00%

420 statements  

« prev     ^ index     » next       coverage.py v7.11.3, created at 2025-11-19 08:00 +0900

1#!/usr/bin/env python3 

2"""State tracking utilities for hook execution and deduplication 

3 

4Provides centralized state management for hook execution tracking, 

5command deduplication, and duplicate prevention. 

6""" 

7 

8import atexit 

9import json 

10import threading 

11import time 

12import uuid 

13import weakref 

14from datetime import datetime 

15from typing import Any, Dict, Optional 

16 

17from lib import ( 

18 ExecutionResult, 

19 HookConfiguration, 

20 configure_logging, 

21 get_logger, 

22 get_performance_metrics, 

23 record_cache_hit, 

24 record_cache_miss, 

25 record_execution_metrics, 

26) 

27 

28 

29class SingletonMeta(type): 

30 """Thread-safe Singleton metaclass with cleanup support""" 

31 

32 _instances = {} 

33 _lock = threading.Lock() 

34 

35 def __call__(cls, *args, **kwargs): 

36 if cls not in cls._instances: 

37 with cls._lock: 

38 if cls not in cls._instances: 

39 instance = super().__call__(*args, **kwargs) 

40 cls._instances[cls] = instance 

41 return cls._instances[cls] 

42 

43 def cleanup_all_instances(cls): 

44 """Cleanup all singleton instances""" 

45 with cls._lock: 

46 for instance in cls._instances.values(): 

47 if hasattr(instance, 'cleanup'): 

48 try: 

49 instance.cleanup() 

50 except Exception: 

51 pass # Silently ignore cleanup errors 

52 cls._instances.clear() 

53 

54 

55class HookStateManager(metaclass=SingletonMeta): 

56 """Centralized state management for hook execution tracking and deduplication 

57 

58 Handles: 

59 - Hook execution counting and tracking 

60 - Phase-based deduplication for SessionStart 

61 - Command deduplication within time windows 

62 - Thread-safe state operations 

63 - Persistent state storage 

64 - Performance monitoring and metrics collection 

65 - Configurable deduplication parameters 

66 """ 

67 

68 def __init__(self, cwd: str, config: Optional[HookConfiguration] = None): 

69 """Initialize state manager for given working directory 

70 

71 Args: 

72 cwd: Current working directory path 

73 config: Optional configuration object, defaults to environment-based config 

74 """ 

75 self.cwd = cwd 

76 self.config = config or HookConfiguration.from_env() 

77 self.logger = get_logger() 

78 

79 # Configure logging based on config 

80 configure_logging( 

81 debug_mode=self.config.debug_mode, 

82 verbose=self.config.enable_verbose_logging 

83 ) 

84 

85 # Initialize state directory with fallback logic 

86 self.state_dir = self.config.get_state_dir(cwd) 

87 self.logger.debug(f"Using state directory: {self.state_dir}") 

88 

89 # Thread safety with configurable timeout 

90 self._lock = threading.RLock() 

91 self._lock_timeout = self.config.lock_timeout_seconds 

92 

93 # Thread lifecycle management 

94 self._cleanup_event = threading.Event() 

95 self._cleanup_thread = None 

96 self._threads = [] # Track all created threads for proper cleanup 

97 

98 # State files 

99 self.hook_state_file = self.state_dir / "hook_execution_state.json" 

100 self.command_state_file = self.state_dir / "command_execution_state.json" 

101 self.performance_metrics_file = self.state_dir / "performance_metrics.json" 

102 

103 # In-memory cache for performance 

104 self._hook_state_cache: Optional[Dict[str, Any]] = None 

105 self._command_state_cache: Optional[Dict[str, Any]] = None 

106 self._cache_timestamp = 0 

107 

108 # Performance tracking 

109 self._performance_metrics = get_performance_metrics() 

110 

111 # Start cache cleanup thread if enabled 

112 if self.config.enable_caching and self.config.cache_cleanup_interval > 0: 

113 self._start_cache_cleanup_thread() 

114 

115 # Register cleanup on garbage collection 

116 weakref.finalize(self, self._cleanup_on_finalize) 

117 

118 def _cleanup_on_finalize(self): 

119 """Cleanup method called during garbage collection""" 

120 try: 

121 self.cleanup() 

122 except Exception: 

123 pass # Silently ignore cleanup errors during finalization 

124 

125 def _start_cache_cleanup_thread(self): 

126 """Start background thread for cache cleanup""" 

127 def cleanup_task(): 

128 while not self._cleanup_event.is_set(): 

129 # Use event.wait() instead of time.sleep() for immediate response to stop signal 

130 if self._cleanup_event.wait(timeout=self.config.cache_cleanup_interval): 

131 break # Stop signal received 

132 try: 

133 self._cleanup_expired_cache_entries() 

134 except Exception as e: 

135 self.logger.warning(f"Cache cleanup task failed: {e}") 

136 

137 cleanup_thread = threading.Thread(target=cleanup_task, daemon=True) 

138 cleanup_thread.start() 

139 self._threads.append(cleanup_thread) 

140 self.logger.debug("Started cache cleanup background thread") 

141 

142 def _cleanup_expired_cache_entries(self): 

143 """Clean up expired cache entries""" 

144 current_time = time.time() 

145 

146 # Clean hook state cache 

147 if (self._hook_state_cache and 

148 current_time - self._cache_timestamp > self.config.state_cache_ttl): 

149 self._hook_state_cache = None 

150 self._cache_timestamp = 0 

151 self.logger.debug("Cleaned expired hook state cache") 

152 

153 # Clean command state cache 

154 if (self._command_state_cache and 

155 current_time - self._cache_timestamp > self.config.state_cache_ttl): 

156 self._command_state_cache = None 

157 self._cache_timestamp = 0 

158 self.logger.debug("Cleaned expired command state cache") 

159 

160 def _load_hook_state(self) -> Dict[str, Any]: 

161 """Load hook execution state with caching and error handling 

162 

163 Returns: 

164 Dictionary containing hook execution state 

165 """ 

166 current_time = time.time() 

167 

168 # Use cache if recent (within configured TTL) 

169 if (self._hook_state_cache and 

170 current_time - self._cache_timestamp < self.config.state_cache_ttl): 

171 if self.config.log_state_changes: 

172 self.logger.debug("Hook state cache hit") 

173 record_cache_hit() 

174 return self._hook_state_cache 

175 

176 try: 

177 if self.hook_state_file.exists(): 

178 with open(self.hook_state_file, "r", encoding=self.config.state_file_encoding) as f: 

179 state = json.load(f) 

180 

181 self._hook_state_cache = state 

182 self._cache_timestamp = current_time 

183 self._performance_metrics.record_state_read() 

184 if self.config.log_state_changes: 

185 self.logger.debug(f"Loaded hook state from {self.hook_state_file}") 

186 record_cache_hit() 

187 return state 

188 except (IOError, json.JSONDecodeError, Exception) as e: 

189 self.logger.warning(f"Failed to load hook state: {e}") 

190 self._performance_metrics.record_io_error() 

191 record_cache_miss() 

192 

193 # Default state structure 

194 default_state = {} 

195 self._hook_state_cache = default_state 

196 self._cache_timestamp = current_time 

197 record_cache_miss() 

198 return default_state 

199 

200 def _save_hook_state(self, state: Dict[str, Any]) -> bool: 

201 """Save hook execution state with error handling 

202 

203 Args: 

204 state: Hook state to save 

205 

206 Returns: 

207 bool: True if save was successful, False otherwise 

208 """ 

209 if not self.config.enable_state_persistence: 

210 return False 

211 

212 try: 

213 # Create backup if enabled 

214 if self.config.backup_on_write and self.hook_state_file.exists(): 

215 backup_file = self.hook_state_file.with_suffix('.json.backup') 

216 import shutil 

217 shutil.copy2(self.hook_state_file, backup_file) 

218 

219 with open(self.hook_state_file, "w", encoding=self.config.state_file_encoding) as f: 

220 json.dump(state, f, indent=self.config.state_file_indent) 

221 

222 self._hook_state_cache = state 

223 self._cache_timestamp = time.time() 

224 self._performance_metrics.record_state_write() 

225 

226 if self.config.log_state_changes: 

227 self.logger.debug(f"Saved hook state to {self.hook_state_file}") 

228 return True 

229 

230 except (IOError, OSError, Exception) as e: 

231 self.logger.error(f"Failed to save hook state: {e}") 

232 self._performance_metrics.record_io_error() 

233 return False 

234 

235 def _load_command_state(self) -> Dict[str, Any]: 

236 """Load command execution state with caching and error handling 

237 

238 Returns: 

239 Dictionary containing command execution state 

240 """ 

241 current_time = time.time() 

242 

243 # Use cache if recent (within configured TTL) 

244 if (self._command_state_cache and 

245 current_time - self._cache_timestamp < self.config.state_cache_ttl): 

246 if self.config.log_state_changes: 

247 self.logger.debug("Command state cache hit") 

248 record_cache_hit() 

249 return self._command_state_cache 

250 

251 try: 

252 if self.command_state_file.exists(): 

253 with open(self.command_state_file, "r", encoding=self.config.state_file_encoding) as f: 

254 state = json.load(f) 

255 

256 self._command_state_cache = state 

257 self._cache_timestamp = current_time 

258 self._performance_metrics.record_state_read() 

259 if self.config.log_state_changes: 

260 self.logger.debug(f"Loaded command state from {self.command_state_file}") 

261 record_cache_hit() 

262 return state 

263 except (IOError, json.JSONDecodeError, Exception) as e: 

264 self.logger.warning(f"Failed to load command state: {e}") 

265 self._performance_metrics.record_io_error() 

266 record_cache_miss() 

267 

268 # Default state structure 

269 default_state = { 

270 "last_command": None, 

271 "last_timestamp": None, 

272 "is_running": False, 

273 "execution_count": 0, 

274 "duplicate_count": 0 

275 } 

276 self._command_state_cache = default_state 

277 self._cache_timestamp = current_time 

278 record_cache_miss() 

279 return default_state 

280 

281 def _save_command_state(self, state: Dict[str, Any]) -> bool: 

282 """Save command execution state with error handling 

283 

284 Args: 

285 state: Command state to save 

286 

287 Returns: 

288 bool: True if save was successful, False otherwise 

289 """ 

290 if not self.config.enable_state_persistence: 

291 return False 

292 

293 try: 

294 # Create backup if enabled 

295 if self.config.backup_on_write and self.command_state_file.exists(): 

296 backup_file = self.command_state_file.with_suffix('.json.backup') 

297 import shutil 

298 shutil.copy2(self.command_state_file, backup_file) 

299 

300 with open(self.command_state_file, "w", encoding=self.config.state_file_encoding) as f: 

301 json.dump(state, f, indent=self.config.state_file_indent) 

302 

303 self._command_state_cache = state 

304 self._cache_timestamp = time.time() 

305 self._performance_metrics.record_state_write() 

306 

307 if self.config.log_state_changes: 

308 self.logger.debug(f"Saved command state to {self.command_state_file}") 

309 return True 

310 

311 except (IOError, OSError, Exception) as e: 

312 self.logger.error(f"Failed to save command state: {e}") 

313 self._performance_metrics.record_io_error() 

314 return False 

315 

316 def track_hook_execution(self, hook_name: str, phase: str = None) -> ExecutionResult: 

317 """Track hook execution and return execution information 

318 

319 Args: 

320 hook_name: Name of the hook being executed 

321 phase: Optional phase for phase-based deduplication 

322 

323 Returns: 

324 ExecutionResult with detailed execution information including: 

325 - executed: Whether the hook was actually executed 

326 - duplicate: Whether this was a duplicate execution 

327 - execution_id: Unique identifier for this execution 

328 - execution_count: Total execution count 

329 - performance metrics and error information 

330 """ 

331 start_time = time.time() 

332 

333 try: 

334 with self._lock: 

335 # Acquire lock with timeout 

336 if not self._lock.acquire(timeout=self._lock_timeout): 

337 self.logger.warning("Failed to acquire lock for hook tracking") 

338 self._performance_metrics.record_concurrent_access_error() 

339 return ExecutionResult( 

340 executed=True, # Allow execution to continue despite lock issue 

341 duplicate=False, 

342 execution_id=str(uuid.uuid4()), 

343 timestamp=start_time, 

344 error="Failed to acquire lock for hook tracking" 

345 ) 

346 

347 state = self._load_hook_state() 

348 current_time = time.time() 

349 execution_id = str(uuid.uuid4()) 

350 

351 # Initialize hook state if not exists 

352 if hook_name not in state: 

353 state[hook_name] = { 

354 "count": 0, 

355 "last_execution": 0, 

356 "last_phase": None, 

357 "executions": [] 

358 } 

359 

360 hook_state = state[hook_name] 

361 

362 # Check for deduplication 

363 is_duplicate = False 

364 deduplication_reason = None 

365 

366 # Phase-based deduplication for SessionStart 

367 if hook_name == "SessionStart" and phase: 

368 # Phase transitions are allowed (clear->compact or compact->clear) 

369 if (phase == hook_state.get("last_phase") and 

370 current_time - hook_state["last_execution"] < self.config.hook_dedupe_window): 

371 # Same phase within time window - deduplicate 

372 is_duplicate = True 

373 deduplication_reason = f"same phase within {self.config.hook_dedupe_window}s window" 

374 else: 

375 # Different phase or time window expired - execute 

376 pass 

377 else: 

378 # Regular deduplication based on time window 

379 if (current_time - hook_state["last_execution"] < self.config.hook_dedupe_window): 

380 is_duplicate = True 

381 deduplication_reason = f"within {self.config.hook_dedupe_window}s deduplication window" 

382 

383 # Update state only if not duplicate 

384 if not is_duplicate: 

385 hook_state["count"] += 1 

386 hook_state["last_execution"] = current_time 

387 hook_state["last_phase"] = phase 

388 hook_state["executions"].append({ 

389 "timestamp": current_time, 

390 "phase": phase, 

391 "execution_id": execution_id 

392 }) 

393 

394 # Keep only recent executions (cleanup) 

395 recent_executions = [ 

396 e for e in hook_state["executions"] 

397 if current_time - e["timestamp"] < self.config.max_state_file_age_hours * 3600 

398 ] 

399 if len(recent_executions) != len(hook_state["executions"]): 

400 hook_state["executions"] = recent_executions 

401 

402 # Save state 

403 save_success = self._save_hook_state(state) 

404 if not save_success: 

405 self.logger.warning("Failed to save hook state") 

406 

407 # Create execution result 

408 execution_time_ms = (time.time() - start_time) * 1000 

409 execution_count = hook_state["count"] 

410 duplicate_count = state.get("duplicate_count", 0) 

411 

412 result = ExecutionResult( 

413 executed=not is_duplicate, 

414 duplicate=is_duplicate, 

415 execution_id=execution_id, 

416 timestamp=current_time, 

417 hook_name=hook_name, 

418 phase=phase, 

419 reason=deduplication_reason, 

420 execution_time_ms=execution_time_ms, 

421 execution_count=execution_count, 

422 duplicate_count=duplicate_count, 

423 state_operations_count=2, # Load + Save 

424 cache_hit=bool(self._hook_state_cache), 

425 warning=None if save_success else "Failed to save state but continuing execution" 

426 ) 

427 

428 # Record performance metrics 

429 record_execution_metrics( 

430 execution_time_ms, 

431 success=True, 

432 is_duplicate=is_duplicate 

433 ) 

434 

435 if self.config.log_state_changes: 

436 self.logger.info(f"Hook execution tracked: {hook_name} (executed: {not is_duplicate}, duplicate: {is_duplicate})") 

437 

438 return result 

439 

440 except Exception as e: 

441 execution_time_ms = (time.time() - start_time) * 1000 

442 

443 # Record failure metrics 

444 record_execution_metrics( 

445 execution_time_ms, 

446 success=False, 

447 is_duplicate=False 

448 ) 

449 

450 self.logger.error(f"Error in hook execution tracking: {e}") 

451 self._performance_metrics.record_other_error() 

452 

453 return ExecutionResult( 

454 executed=True, # Allow execution to continue despite error 

455 duplicate=False, 

456 execution_id=str(uuid.uuid4()), 

457 timestamp=start_time, 

458 hook_name=hook_name, 

459 phase=phase, 

460 error=str(e), 

461 execution_time_ms=execution_time_ms, 

462 warning="Execution tracking failed but continuing execution" 

463 ) 

464 

465 finally: 

466 # Release lock if acquired 

467 if self._lock.locked(): 

468 self._lock.release() 

469 

470 def deduplicate_command(self, command: str) -> ExecutionResult: 

471 """Check and deduplicate command execution within time window 

472 

473 Args: 

474 command: Command string to check for deduplication 

475 

476 Returns: 

477 ExecutionResult with detailed deduplication information including: 

478 - executed: Whether the command should execute 

479 - duplicate: Whether this was a duplicate 

480 - reason: Reason for deduplication decision 

481 - execution_count: Total execution count 

482 - performance metrics and error information 

483 """ 

484 start_time = time.time() 

485 

486 try: 

487 with self._lock: 

488 # Acquire lock with timeout 

489 if not self._lock.acquire(timeout=self._lock_timeout): 

490 self.logger.warning("Failed to acquire lock for command deduplication") 

491 self._performance_metrics.record_concurrent_access_error() 

492 return ExecutionResult( 

493 executed=True, # Allow execution to continue despite lock issue 

494 duplicate=False, 

495 execution_id=str(uuid.uuid4()), 

496 timestamp=start_time, 

497 command=command, 

498 error="Failed to acquire lock for command deduplication" 

499 ) 

500 

501 state = self._load_command_state() 

502 current_time = time.time() 

503 execution_id = str(uuid.uuid4()) 

504 

505 # Check if command is an Alfred command (only deduplicate these) 

506 if not command or not command.startswith("/alfred:"): 

507 result = ExecutionResult( 

508 executed=True, 

509 duplicate=False, 

510 execution_id=execution_id, 

511 timestamp=current_time, 

512 command=command, 

513 reason="non-alfred command", 

514 execution_count=state["execution_count"], 

515 execution_time_ms=(time.time() - start_time) * 1000, 

516 state_operations_count=1 # Load 

517 ) 

518 

519 # Record performance metrics 

520 record_execution_metrics( 

521 result.execution_time_ms, 

522 success=True, 

523 is_duplicate=False 

524 ) 

525 

526 if self.config.log_state_changes: 

527 self.logger.info(f"Non-Alfred command: {command}") 

528 

529 return result 

530 

531 # Check for duplicate within time window 

532 last_cmd = state.get("last_command") 

533 last_timestamp = state.get("last_timestamp") 

534 

535 is_duplicate = False 

536 deduplication_reason = None 

537 

538 if (last_cmd and last_timestamp and 

539 command == last_cmd and 

540 current_time - last_timestamp < self.config.command_dedupe_window): 

541 

542 # Duplicate detected 

543 is_duplicate = True 

544 deduplication_reason = f"within {self.config.command_dedupe_window}s deduplication window" 

545 state["duplicate_count"] += 1 

546 state["is_running"] = True # Mark as running to prevent further duplicates 

547 state["duplicate_timestamp"] = datetime.fromtimestamp(current_time).isoformat() 

548 

549 # Save state 

550 save_success = self._save_command_state(state) 

551 if not save_success: 

552 self.logger.warning("Failed to save command state for duplicate detection") 

553 

554 result = ExecutionResult( 

555 executed=True, # Allow execution but mark as duplicate 

556 duplicate=True, 

557 execution_id=execution_id, 

558 timestamp=current_time, 

559 command=command, 

560 phase=None, 

561 reason=deduplication_reason, 

562 execution_count=state["execution_count"], 

563 duplicate_count=state["duplicate_count"], 

564 execution_time_ms=(time.time() - start_time) * 1000, 

565 state_operations_count=2, # Load + Save 

566 cache_hit=bool(self._command_state_cache), 

567 warning=None if save_success else "Failed to save duplicate state" 

568 ) 

569 

570 else: 

571 # Not a duplicate - update state and execute 

572 state["last_command"] = command 

573 state["last_timestamp"] = current_time 

574 state["is_running"] = True 

575 state["execution_count"] += 1 

576 

577 # Save state 

578 save_success = self._save_command_state(state) 

579 if not save_success: 

580 self.logger.warning("Failed to save command state for normal execution") 

581 

582 result = ExecutionResult( 

583 executed=True, 

584 duplicate=False, 

585 execution_id=execution_id, 

586 timestamp=current_time, 

587 command=command, 

588 reason="normal execution", 

589 execution_count=state["execution_count"], 

590 execution_time_ms=(time.time() - start_time) * 1000, 

591 state_operations_count=2, # Load + Save 

592 cache_hit=bool(self._command_state_cache), 

593 warning=None if save_success else "Failed to save command state" 

594 ) 

595 

596 # Record performance metrics 

597 record_execution_metrics( 

598 result.execution_time_ms, 

599 success=True, 

600 is_duplicate=is_duplicate 

601 ) 

602 

603 if self.config.log_state_changes: 

604 self.logger.info(f"Command deduplication: {command} (executed: {not is_duplicate}, duplicate: {is_duplicate})") 

605 

606 return result 

607 

608 except Exception as e: 

609 execution_time_ms = (time.time() - start_time) * 1000 

610 

611 # Record failure metrics 

612 record_execution_metrics( 

613 execution_time_ms, 

614 success=False, 

615 is_duplicate=False 

616 ) 

617 

618 self.logger.error(f"Error in command deduplication: {e}") 

619 self._performance_metrics.record_other_error() 

620 

621 return ExecutionResult( 

622 executed=True, # Allow execution to continue despite error 

623 duplicate=False, 

624 execution_id=str(uuid.uuid4()), 

625 timestamp=start_time, 

626 command=command, 

627 error=str(e), 

628 execution_time_ms=execution_time_ms, 

629 warning="Command deduplication failed but continuing execution" 

630 ) 

631 

632 finally: 

633 # Release lock if acquired 

634 if self._lock.locked(): 

635 self._lock.release() 

636 

637 def mark_command_complete(self, command: Optional[str] = None) -> None: 

638 """Mark command execution as complete 

639 

640 Args: 

641 command: Optional command that completed 

642 """ 

643 try: 

644 with self._lock: 

645 state = self._load_command_state() 

646 state["is_running"] = False 

647 state["last_timestamp"] = time.time() 

648 if command: 

649 state["last_command"] = command 

650 

651 self._save_command_state(state) 

652 

653 if self.config.log_state_changes: 

654 self.logger.info(f"Command marked as complete: {command or 'unknown'}") 

655 

656 except Exception as e: 

657 self.logger.error(f"Failed to mark command as complete: {e}") 

658 self._performance_metrics.record_other_error() 

659 

660 def get_hook_execution_count(self, hook_name: str) -> int: 

661 """Get total execution count for a hook""" 

662 state = self._load_hook_state() 

663 return state.get(hook_name, {}).get("count", 0) 

664 

665 def get_command_execution_count(self) -> int: 

666 """Get total command execution count""" 

667 state = self._load_command_state() 

668 return state.get("execution_count", 0) 

669 

670 def cleanup_old_states(self, max_age_hours: int = None) -> None: 

671 """Clean up old state entries to prevent state file bloat 

672 

673 Args: 

674 max_age_hours: Maximum age for state entries in hours, defaults to config setting 

675 """ 

676 if not self.config.enable_state_persistence: 

677 return 

678 

679 max_age = max_age_hours or self.config.max_state_file_age_hours 

680 current_time = time.time() 

681 max_age_seconds = max_age * 3600 

682 

683 try: 

684 with self._lock: 

685 # Clean up hook state 

686 hook_state = self._load_hook_state() 

687 

688 # Clean up old hook executions 

689 for hook_name in list(hook_state.keys()): 

690 hook_data = hook_state[hook_name] 

691 if "executions" in hook_data: 

692 recent_executions = [ 

693 e for e in hook_data["executions"] 

694 if current_time - e["timestamp"] < max_age_seconds 

695 ] 

696 if len(recent_executions) != len(hook_data["executions"]): 

697 hook_data["executions"] = recent_executions 

698 if self.config.log_state_changes: 

699 self.logger.debug(f"Cleaned up {len(recent_executions)} executions for {hook_name}") 

700 

701 # Remove hooks with no recent executions 

702 if (hook_data.get("last_execution", 0) < current_time - max_age_seconds): 

703 del hook_state[hook_name] 

704 if self.config.log_state_changes: 

705 self.logger.debug(f"Removed old hook state: {hook_name}") 

706 

707 self._save_hook_state(hook_state) 

708 

709 # Clean up command state 

710 command_state = self._load_command_state() 

711 if (command_state.get("last_timestamp", 0) < current_time - max_age_seconds): 

712 # Reset command state if too old 

713 command_state.update({ 

714 "last_command": None, 

715 "last_timestamp": None, 

716 "is_running": False, 

717 "execution_count": 0, 

718 "duplicate_count": 0 

719 }) 

720 self._save_command_state(command_state) 

721 if self.config.log_state_changes: 

722 self.logger.debug("Reset old command state") 

723 

724 except Exception as e: 

725 self.logger.error(f"Failed to clean up old states: {e}") 

726 self._performance_metrics.record_other_error() 

727 

728 def get_performance_summary(self) -> Dict[str, Any]: 

729 """Get performance summary for the state manager 

730 

731 Returns: 

732 Dictionary with performance metrics summary 

733 """ 

734 return self._performance_metrics.get_summary() 

735 

736 def reset_performance_metrics(self) -> None: 

737 """Reset performance metrics for this state manager""" 

738 # Note: This functionality requires _global_metrics_lock and PerformanceMetrics 

739 # which are not currently defined in the module 

740 self._performance_metrics = get_performance_metrics() 

741 if self.config.log_state_changes: 

742 self.logger.info("Performance metrics reset") 

743 

744 def cleanup(self, timeout: float = 5.0) -> None: 

745 """Cleanup resources and stop all threads 

746 

747 Args: 

748 timeout: Maximum time to wait for threads to finish (default: 5.0 seconds) 

749 """ 

750 if self._cleanup_event.is_set(): 

751 return # Already cleaned up 

752 

753 # Signal all threads to stop 

754 self._cleanup_event.set() 

755 

756 # Wait for all threads to finish 

757 for thread in self._threads: 

758 if thread.is_alive(): 

759 thread.join(timeout=timeout) 

760 if thread.is_alive(): 

761 self.logger.warning(f"Thread {thread.name} did not finish within timeout") 

762 

763 # Clear thread list 

764 self._threads.clear() 

765 

766 # Clear caches 

767 self._hook_state_cache = None 

768 self._command_state_cache = None 

769 self._cache_timestamp = 0 

770 

771 if self.config.log_state_changes: 

772 self.logger.debug("HookStateManager cleanup completed") 

773 

774 def stop(self) -> None: 

775 """Alias for cleanup method - stop the state manager""" 

776 self.cleanup() 

777 

778 def __enter__(self): 

779 """Context manager entry""" 

780 return self 

781 

782 def __exit__(self, exc_type, exc_val, exc_tb): 

783 """Context manager exit - ensures cleanup""" 

784 self.cleanup() 

785 return False # Don't suppress exceptions 

786 

787 def __del__(self): 

788 """Destructor - ensures cleanup on garbage collection""" 

789 try: 

790 if not self._cleanup_event.is_set(): 

791 self.cleanup(timeout=1.0) # Quick cleanup during garbage collection 

792 except Exception: 

793 pass # Silently ignore cleanup errors during destruction 

794 

795 

796# Global state manager instances (per-CWD) 

797_state_managers: Dict[str, HookStateManager] = {} 

798_state_manager_lock = threading.RLock() 

799 

800 

801def get_state_manager(cwd: str, config: Optional[HookConfiguration] = None) -> HookStateManager: 

802 """Get or create state manager for given working directory 

803 

804 Note: With Singleton pattern, each cwd gets its own unique instance, 

805 but duplicate instances for the same cwd are prevented. 

806 

807 Args: 

808 cwd: Current working directory path 

809 config: Optional configuration object 

810 

811 Returns: 

812 HookStateManager instance for the given directory 

813 """ 

814 with _state_manager_lock: 

815 # Check if we already have an instance for this cwd 

816 existing_instance_key = None 

817 for key, instance in _state_managers.items(): 

818 if hasattr(instance, 'cwd') and instance.cwd == cwd: 

819 existing_instance_key = key 

820 break 

821 

822 if existing_instance_key is not None: 

823 return _state_managers[existing_instance_key] 

824 

825 # Create new instance with cwd as part of key for uniqueness 

826 instance_key = f"{cwd}_{len(_state_managers)}" 

827 try: 

828 instance = HookStateManager(cwd, config) 

829 _state_managers[instance_key] = instance 

830 return instance 

831 except Exception as e: 

832 # Clean up on creation failure 

833 if instance_key in _state_managers: 

834 del _state_managers[instance_key] 

835 raise e 

836 

837 

838def track_hook_execution(hook_name: str, cwd: str, phase: str = None, config: Optional[HookConfiguration] = None) -> ExecutionResult: 

839 """Convenience function to track hook execution 

840 

841 Args: 

842 hook_name: Name of the hook being executed 

843 cwd: Current working directory 

844 phase: Optional phase for phase-based deduplication 

845 config: Optional configuration object 

846 

847 Returns: 

848 ExecutionResult with execution information 

849 """ 

850 manager = get_state_manager(cwd, config) 

851 return manager.track_hook_execution(hook_name, phase) 

852 

853 

854def deduplicate_command(command: str, cwd: str, config: Optional[HookConfiguration] = None) -> ExecutionResult: 

855 """Convenience function to deduplicate command 

856 

857 Args: 

858 command: Command string to check for deduplication 

859 cwd: Current working directory 

860 config: Optional configuration object 

861 

862 Returns: 

863 ExecutionResult with deduplication information 

864 """ 

865 manager = get_state_manager(cwd, config) 

866 return manager.deduplicate_command(command) 

867 

868 

869def mark_command_complete(command: Optional[str] = None, cwd: Optional[str] = None, config: Optional[HookConfiguration] = None) -> None: 

870 """Convenience function to mark command complete 

871 

872 Args: 

873 command: Optional command that completed 

874 cwd: Current working directory 

875 config: Optional configuration object 

876 """ 

877 if not cwd: 

878 cwd = "." 

879 manager = get_state_manager(cwd, config) 

880 manager.mark_command_complete(command) 

881 

882 

883def cleanup_old_states(max_age_hours: Optional[int] = None, cwd: Optional[str] = None, config: Optional[HookConfiguration] = None) -> None: 

884 """Convenience function to clean up old states 

885 

886 Args: 

887 max_age_hours: Maximum age for state entries in hours 

888 cwd: Current working directory 

889 config: Optional configuration object 

890 """ 

891 if not cwd: 

892 cwd = "." 

893 manager = get_state_manager(cwd, config) 

894 manager.cleanup_old_states(max_age_hours) 

895 

896 

897def get_performance_summary(cwd: Optional[str] = None, config: Optional[HookConfiguration] = None) -> Dict[str, Any]: 

898 """Convenience function to get performance summary 

899 

900 Args: 

901 cwd: Current working directory 

902 config: Optional configuration object 

903 

904 Returns: 

905 Dictionary with performance metrics summary 

906 """ 

907 if not cwd: 

908 cwd = "." 

909 manager = get_state_manager(cwd, config) 

910 return manager.get_performance_summary() 

911 

912 

913def cleanup_all_state_managers(timeout: float = 5.0) -> None: 

914 """Cleanup all state manager instances and stop all threads 

915 

916 Args: 

917 timeout: Maximum time to wait for threads to finish (default: 5.0 seconds) 

918 """ 

919 with _state_manager_lock: 

920 for instance in list(_state_managers.values()): 

921 try: 

922 instance.cleanup(timeout) 

923 except Exception as e: 

924 # Log error but continue cleanup of other instances 

925 try: 

926 logger = get_logger() 

927 logger.error(f"Error during state manager cleanup: {e}") 

928 except Exception: 

929 pass # Silently ignore logging errors 

930 

931 # Clear all instances 

932 _state_managers.clear() 

933 

934 

935def force_cleanup_all_singletons() -> None: 

936 """Force cleanup of all singleton instances using the metaclass cleanup method""" 

937 try: 

938 HookStateManager.cleanup_all_instances() 

939 except Exception: 

940 pass # Silently ignore cleanup errors 

941 

942 

943# Module-level cleanup on import/unload 

944atexit.register(cleanup_all_state_managers) 

945atexit.register(force_cleanup_all_singletons)