Coverage for src / moai_adk / core / hooks / post_tool_auto_spec_completion.py: 13.28%

241 statements  

« prev     ^ index     » next       coverage.py v7.12.0, created at 2025-11-20 20:52 +0900

1"""PostToolUse Hook for Automated SPEC Completion System.""" 

2 

3import hashlib 

4import logging 

5import os 

6import re 

7import time 

8from typing import Any, Dict, List 

9 

10 

11# SpecGenerator: Placeholder for spec generation functionality 

12class SpecGenerator: 

13 """Placeholder SpecGenerator class for auto-spec completion.""" 

14 

15 def __init__(self): 

16 self.name = "SpecGenerator" 

17 

18 def generate_spec(self, file_path: str, content: str) -> str: 

19 """Generate a basic SPEC document.""" 

20 return f"SPEC document for {file_path}\n\nContent analysis:\n{content[:200]}..." 

21 

22 

23# BaseHook: Simplified base hook class for auto-spec completion 

24class BaseHook: 

25 """Base hook class for auto-spec completion.""" 

26 

27 def __init__(self): 

28 self.name = "PostToolAutoSpecCompletion" 

29 self.description = "PostToolUse Hook for Automated SPEC Completion System" 

30 

31 

32# Configure logging 

33logger = logging.getLogger(__name__) 

34 

35 

36class PostToolAutoSpecCompletion(BaseHook): 

37 """ 

38 PostToolUse Hook for automated SPEC completion. 

39 

40 This hook detects code file changes after Write/Edit/MultiEdit tools 

41 and automatically generates complete SPEC documents in EARS format. 

42 """ 

43 

44 def __init__(self): 

45 super().__init__() 

46 self.spec_generator = SpecGenerator() 

47 self.auto_config = self._get_auto_spec_config() 

48 

49 # Track processed files to avoid duplicates 

50 self.processed_files = set() 

51 

52 def _get_auto_spec_config(self) -> Dict[str, Any]: 

53 """Get auto-spec completion configuration.""" 

54 try: 

55 from moai_adk.core.config.config_manager import ConfigManager 

56 

57 config = ConfigManager() 

58 return config.get_value( 

59 "auto_spec_completion", 

60 { 

61 "enabled": True, 

62 "min_confidence": 0.7, 

63 "auto_open_editor": True, 

64 "supported_languages": ["python", "javascript", "typescript", "go"], 

65 "excluded_patterns": ["test_", "spec_", "__tests__"], 

66 }, 

67 ) 

68 except ImportError: 

69 return { 

70 "enabled": True, 

71 "min_confidence": 0.7, 

72 "auto_open_editor": True, 

73 "supported_languages": ["python", "javascript", "typescript", "go"], 

74 "excluded_patterns": ["test_", "spec_", "__tests__"], 

75 } 

76 

77 def should_trigger_spec_completion( 

78 self, tool_name: str, tool_args: Dict[str, Any] 

79 ) -> bool: 

80 """ 

81 Determine if spec completion should be triggered. 

82 

83 Args: 

84 tool_name: Name of the tool that was executed 

85 tool_args: Arguments passed to the tool 

86 

87 Returns: 

88 True if spec completion should be triggered 

89 """ 

90 # Check if auto-spec completion is enabled 

91 if not self.auto_config.get("enabled", True): 

92 logger.debug("Auto-spec completion is disabled") 

93 return False 

94 

95 # Only trigger for Write/Edit/MultiEdit tools 

96 if tool_name not in ["Write", "Edit", "MultiEdit"]: 

97 logger.debug(f"Tool {tool_name} does not trigger spec completion") 

98 return False 

99 

100 # Extract file paths from tool arguments 

101 file_paths = self._extract_file_paths(tool_args) 

102 

103 if not file_paths: 

104 logger.debug("No file paths found in tool arguments") 

105 return False 

106 

107 # Check if any file is a supported language 

108 supported_files = [] 

109 for file_path in file_paths: 

110 if self._is_supported_file(file_path): 

111 supported_files.append(file_path) 

112 else: 

113 logger.debug( 

114 f"File {file_path} is not supported for auto-spec completion" 

115 ) 

116 

117 if not supported_files: 

118 logger.debug("No supported files found") 

119 return False 

120 

121 # Check for excluded patterns 

122 excluded_files = [] 

123 for file_path in supported_files: 

124 if self._is_excluded_file(file_path): 

125 excluded_files.append(file_path) 

126 

127 # Filter out excluded files 

128 target_files = [f for f in supported_files if f not in excluded_files] 

129 

130 if not target_files: 

131 logger.debug("All files are excluded from auto-spec completion") 

132 return False 

133 

134 return True 

135 

136 def _extract_file_paths(self, tool_args: Dict[str, Any]) -> List[str]: 

137 """Extract file paths from tool arguments.""" 

138 file_paths = [] 

139 

140 # Handle Write tool 

141 if "file_path" in tool_args: 

142 file_paths.append(tool_args["file_path"]) 

143 

144 # Handle Edit tool 

145 if "file_path" in tool_args: 

146 file_paths.append(tool_args["file_path"]) 

147 

148 # Handle MultiEdit tool 

149 if "edits" in tool_args: 

150 for edit in tool_args["edits"]: 

151 if "file_path" in edit: 

152 file_paths.append(edit["file_path"]) 

153 

154 # Remove duplicates and resolve relative paths 

155 unique_paths = [] 

156 for path in file_paths: 

157 if path not in unique_paths: 

158 abs_path = os.path.abspath(path) 

159 unique_paths.append(abs_path) 

160 

161 return unique_paths 

162 

163 def _is_supported_file(self, file_path: str) -> bool: 

164 """Check if file is supported for auto-spec completion.""" 

165 # Get file extension 

166 file_ext = os.path.splitext(file_path)[1].lower() 

167 

168 # Map extensions to languages 

169 supported_extensions = { 

170 ".py": "python", 

171 ".js": "javascript", 

172 ".jsx": "javascript", 

173 ".ts": "typescript", 

174 ".tsx": "typescript", 

175 ".go": "go", 

176 } 

177 

178 if file_ext not in supported_extensions: 

179 return False 

180 

181 # Check if language is supported 

182 language = supported_extensions[file_ext] 

183 supported_languages = self.auto_config.get("supported_languages", []) 

184 return language in supported_languages 

185 

186 def _is_excluded_file(self, file_path: str) -> bool: 

187 """Check if file should be excluded from auto-spec completion.""" 

188 file_name = os.path.basename(file_path) 

189 file_dir = os.path.basename(os.path.dirname(file_path)) 

190 

191 excluded_patterns = self.auto_config.get("excluded_patterns", []) 

192 

193 for pattern in excluded_patterns: 

194 # Check filename patterns 

195 if re.search(pattern, file_name): 

196 return True 

197 # Check directory patterns 

198 if re.search(pattern, file_dir): 

199 return True 

200 

201 return False 

202 

203 def detect_code_changes( 

204 self, tool_name: str, tool_args: Dict[str, Any], result: Any 

205 ) -> List[str]: 

206 """ 

207 Detect code changes from tool execution. 

208 

209 Args: 

210 tool_name: Name of the tool that was executed 

211 tool_args: Arguments passed to the tool 

212 result: Result from tool execution 

213 

214 Returns: 

215 List of affected file paths 

216 """ 

217 file_paths = [] 

218 

219 # Write tool creates new files 

220 if tool_name == "Write": 

221 if "file_path" in tool_args: 

222 file_paths.append(tool_args["file_path"]) 

223 

224 # Edit tool modifies existing files 

225 elif tool_name == "Edit": 

226 if "file_path" in tool_args: 

227 file_paths.append(tool_args["file_path"]) 

228 

229 # MultiEdit tool can modify multiple files 

230 elif tool_name == "MultiEdit": 

231 if "edits" in tool_args: 

232 for edit in tool_args["edits"]: 

233 if "file_path" in edit: 

234 file_paths.append(edit["file_path"]) 

235 

236 # Convert to absolute paths 

237 abs_paths = [os.path.abspath(path) for path in file_paths] 

238 

239 # Filter out already processed files 

240 new_paths = [path for path in abs_paths if path not in self.processed_files] 

241 

242 # Add to processed files 

243 self.processed_files.update(new_paths) 

244 

245 return new_paths 

246 

247 def calculate_completion_confidence(self, analysis: Dict[str, Any]) -> float: 

248 """ 

249 Calculate confidence score for SPEC completion. 

250 

251 Args: 

252 analysis: Code analysis result 

253 

254 Returns: 

255 Confidence score between 0.0 and 1.0 

256 """ 

257 # Default confidence if analysis is incomplete 

258 if not analysis: 

259 return 0.5 

260 

261 structure_score = analysis.get("structure_score", 0.5) 

262 domain_accuracy = analysis.get("domain_accuracy", 0.5) 

263 documentation_level = analysis.get("documentation_level", 0.5) 

264 

265 # Weighted calculation 

266 # Structure clarity: 30% 

267 # Domain accuracy: 40% 

268 # Documentation level: 30% 

269 confidence = ( 

270 structure_score * 0.3 + domain_accuracy * 0.4 + documentation_level * 0.3 

271 ) 

272 

273 return min(max(confidence, 0.0), 1.0) 

274 

275 def generate_complete_spec( 

276 self, analysis: Dict[str, Any], file_path: str 

277 ) -> Dict[str, str]: 

278 """ 

279 Generate complete SPEC documents in EARS format. 

280 

281 Args: 

282 analysis: Code analysis result 

283 file_path: Path to the analyzed file 

284 

285 Returns: 

286 Dictionary containing spec.md, plan.md, and acceptance.md 

287 """ 

288 spec_id = self._generate_spec_id(file_path) 

289 file_name = os.path.basename(file_path) 

290 

291 # Generate basic spec content 

292 spec_md = self._generate_spec_content(analysis, spec_id, file_name) 

293 plan_md = self._generate_plan_content(analysis, spec_id, file_name) 

294 acceptance_md = self._generate_acceptance_content(analysis, spec_id, file_name) 

295 

296 return { 

297 "spec_id": spec_id, 

298 "spec_md": spec_md, 

299 "plan_md": plan_md, 

300 "acceptance_md": acceptance_md, 

301 } 

302 

303 def _generate_spec_id(self, file_path: str) -> str: 

304 """Generate unique SPEC ID from file path.""" 

305 # Extract meaningful name from file path 

306 file_name = os.path.basename(file_path) 

307 name_parts = file_name.split("_") 

308 

309 # Convert to uppercase and join 

310 meaningful_name = "".join(part.upper() for part in name_parts if part) 

311 

312 # Add hash to ensure uniqueness 

313 file_hash = hashlib.md5(file_path.encode()).hexdigest()[:4] 

314 

315 return f"{meaningful_name}-{file_hash}" 

316 

317 def _generate_spec_content( 

318 self, analysis: Dict[str, Any], spec_id: str, file_name: str 

319 ) -> str: 

320 """Generate main spec.md content.""" 

321 template = f"""--- 

322 "id": "SPEC-{spec_id}", 

323 "title": "Auto-generated SPEC for {file_name}", 

324 "title_en": "Auto-generated SPEC for {file_name}", 

325 "version": "1.0.0", 

326 "status": "pending", 

327 "created": "{time.strftime('%Y-%m-%d')}", 

328 "author": "@alfred-auto", 

329 "reviewer": "", 

330 "category": "FEATURE", 

331 "priority": "MEDIUM", 

332 "tags": ["auto-generated", "{spec_id}"], 

333 "language": "en", 

334 "estimated_complexity": "auto" 

335}} 

336--- 

337 

338## Auto-generated SPEC for {file_name} 

339 

340### Overview 

341 

342{analysis.get('description', 'This spec was auto-generated based on code analysis.')} 

343 

344### Environment 

345 

346- **Project**: MoAI-ADK Auto-generated SPEC 

347- **Language**: {analysis.get('language', 'Python')} 

348- **File**: {file_name} 

349- **Generation Method**: Automatic analysis-based 

350- **Status**: Review required 

351 

352### Assumptions 

353 

3541. Code structure is clearly defined 

3552. Domain-specific terminology is expected to be used 

3563. Standard development practices are assumed to be followed 

3574. Generated SPEC will be finalized after user review 

358 

359### Requirements 

360 

361#### Ubiquitous Requirements 

362 

363- **REQ-001**: System must perform the functionality of {file_name} 

364- **REQ-002**: Generated functionality must be stable 

365- **REQ-003**: Code must be written in a maintainable form 

366- **REQ-004**: Tests must satisfy functional requirements 

367- **REQ-005**: Code must comply with project coding standards 

368 

369#### State-driven Requirements 

370 

371{analysis.get('state_requirements', '- **REQ-006**: System must transition from initial state to target state')} 

372 

373#### Event-driven Requirements 

374 

375{analysis.get('event_requirements', '- **REQ-007**: System must respond when user input occurs')} 

376 

377### Specifications 

378 

379{analysis.get('specifications', '- **SPEC-001**: System must implement requirements')} 

380 

381### Traceability 

382 

383 

384### Edit Guide 

385 

386**User Review Recommendations:** 

3871. ✅ Verify technical clarity 

3882. ✅ Specify requirements 

3893. ✅ Review domain-specific terminology 

3904. ✅ Define state and event requirements 

3915. ✅ Detail specifications 

392 

393**Quality Improvement Suggestions:** 

394- Add domain-specific terminology 

395- Specify user cases 

396- Define performance requirements 

397- Add security requirements 

398""" 

399 return template 

400 

401 def _generate_plan_content( 

402 self, analysis: Dict[str, Any], spec_id: str, file_name: str 

403 ) -> str: 

404 """Generate plan.md content.""" 

405 return f"""--- 

406 "id": "PLAN-{spec_id}", 

407 "spec_id": "SPEC-{spec_id}", 

408 "title": "Auto-generated Implementation Plan for {file_name}", 

409 "version": "1.0.0", 

410 "status": "pending", 

411 "created": "{time.strftime('%Y-%m-%d')}", 

412 "author": "@alfred-auto" 

413}} 

414--- 

415 

416## Auto-generated Implementation Plan for {file_name} 

417 

418### Implementation Phases 

419 

420#### Phase 1: Basic Structure Review (Priority: High) 

421 

422- [ ] Complete code structure analysis 

423- [ ] Identify core functionality 

424- [ ] Verify dependencies 

425- [ ] Set up test environment 

426 

427#### Phase 2: Requirements Specification (Priority: Medium) 

428 

429- [ ] Specify ubiquitous requirements 

430- [ ] Define state-driven requirements 

431- [ ] Review event-driven requirements 

432- [ ] Set performance requirements 

433 

434#### Phase 3: Implementation Planning (Priority: Medium) 

435 

436- [ ] Design module architecture 

437- [ ] Define interfaces 

438- [ ] Design data structures 

439- [ ] Plan error handling 

440 

441#### Phase 4: Test Strategy Development (Priority: High) 

442 

443- [ ] Plan unit tests 

444- [ ] Plan integration tests 

445- [ ] User story-based testing 

446- [ ] Implement test automation 

447 

448### Technical Approach 

449 

450#### Architecture Design 

451 

452``` 

453{analysis.get('architecture', 'User Input → Validation → Business Logic → Data Processing → Output')} 

454 

455[Core Components] → [External Services] → [Data Layer] 

456``` 

457 

458#### Core Components 

459 

4601. **{analysis.get('main_component', 'Main Class')}**: Handle primary business logic 

4612. **{analysis.get('service_component', 'Service Layer')}**: Integrate external services 

4623. **{analysis.get('data_component', 'Data Layer')}**: Process and store data 

4634. **{analysis.get('component_4', 'Validation Layer')}**: Validate input and check validity 

464 

465#### Dependency Management 

466 

467**Utilize Existing Modules:** 

468- {analysis.get('existing_modules', 'Utilize standard libraries')} 

469 

470**Add New Modules:** 

471- {analysis.get('new_modules', 'Add as needed')} 

472 

473### Success Criteria 

474 

475#### Functional Criteria 

476 

477- ✅ All requirements implemented 

478- ✅ Test coverage 85% or higher 

479- ✅ Performance targets met 

480- ✅ User requirements satisfied 

481 

482#### Performance Criteria 

483 

484- ✅ Response time {analysis.get('performance_target', 'within 1 second')} 

485- ✅ Memory usage optimized 

486- ✅ Parallel processing supported 

487- ✅ Scalability verified 

488 

489#### Quality Criteria 

490 

491- ✅ Code quality verification passed 

492- ✅ Security scanning passed 

493- ✅ Documentation completeness verified 

494- ✅ Maintainability validated 

495 

496### Next Steps 

497 

4981. **Immediate**: Basic structure review (1-2 days) 

4992. **Weekly Goal**: Requirements specification (3-5 days) 

5003. **2-Week Goal**: Implementation completion (7-14 days) 

5014. **Release Preparation**: Testing and validation (14-16 days) 

502""" 

503 

504 def _generate_acceptance_content( 

505 self, analysis: Dict[str, Any], spec_id: str, file_name: str 

506 ) -> str: 

507 """Generate acceptance.md content.""" 

508 return f"""--- 

509 "id": "ACCEPT-{spec_id}", 

510 "spec_id": "SPEC-{spec_id}", 

511 "title": "Auto-generated Acceptance Criteria for {file_name}", 

512 "version": "1.0.0", 

513 "status": "pending", 

514 "created": "{time.strftime('%Y-%m-%d')}", 

515 "author": "@alfred-auto" 

516}} 

517--- 

518 

519## Auto-generated Acceptance Criteria for {file_name} 

520 

521### Acceptance Criteria 

522 

523#### Basic Functionality 

524 

525**Must-have:** 

526- [ ] {analysis.get('must_have_1', 'System must operate normally')} 

527- [ ] {analysis.get('must_have_2', 'User interface must display correctly')} 

528- [ ] {analysis.get('must_have_3', 'Data processing logic must function properly')} 

529 

530**Should-have:** 

531- [ ] {analysis.get('should_have_1', 'User experience must be smooth')} 

532- [ ] {analysis.get('should_have_2', 'Performance targets must be met')} 

533 

534#### Performance Testing 

535 

536**Performance Requirements:** 

537- [ ] Response time: {analysis.get('response_time', 'within 1 second')} 

538- [ ] Concurrent users: support {analysis.get('concurrent_users', '100 users')} or more 

539- [ ] Memory usage: {analysis.get('memory_usage', '100MB or less')} 

540- [ ] CPU utilization: {analysis.get('cpu_usage', '50% or less')} 

541 

542**Load Testing:** 

543- [ ] Functional load testing passed 

544- [ ] Long-term stability testing passed 

545- [ ] Recovery testing passed 

546 

547#### Security Testing 

548 

549**Security Requirements:** 

550- [ ] {analysis.get('security_req_1', 'Authentication and authorization verification passed')} 

551- [ ] {analysis.get('security_req_2', 'Input validation passed')} 

552- [ ] {analysis.get('security_req_3', 'SQL injection protection passed')} 

553 

554**Vulnerability Testing:** 

555- [ ] OWASP Top 10 inspection passed 

556- [ ] Security scanning passed 

557- [ ] Permission settings verification passed 

558 

559#### Compatibility Testing 

560 

561**Browser Compatibility:** 

562- [ ] Chrome latest version 

563- [ ] Firefox latest version 

564- [ ] Safari latest version 

565- [ ] Edge latest version 

566 

567**Device Compatibility:** 

568- [ ] Desktop (1920x1080) 

569- [ ] Tablet (768x1024) 

570- [ ] Mobile (375x667) 

571 

572#### User Acceptance Testing 

573 

574**User Scenarios:** 

575- [ ] {analysis.get('user_scenario_1', 'General user scenario testing passed')} 

576- [ ] {analysis.get('user_scenario_2', 'Administrator scenario testing passed')} 

577- [ ] {analysis.get('user_scenario_3', 'Error handling scenario testing passed')} 

578 

579**User Feedback:** 

580- [ ] User satisfaction 80% or higher 

581- [ ] Feature usability evaluation 

582- [ ] Design and UI/UX verification 

583 

584### Validation Process 

585 

586#### Phase 1: Unit Tests 

587 

588- [ ] Developer testing completed 

589- [ ] Code review passed 

590- [ ] Automated testing passed 

591- [ ] Code coverage 85% or higher 

592 

593#### Phase 2: Integration Tests 

594 

595- [ ] Inter-module integration testing 

596- [ ] API integration testing 

597- [ ] Database integration testing 

598- [ ] External service integration testing 

599 

600#### Phase 3: System Tests 

601 

602- [ ] Full system functionality testing 

603- [ ] Performance testing 

604- [ ] Security testing 

605- [ ] Stability testing 

606 

607#### Phase 4: User Tests 

608 

609- [ ] Internal user testing 

610- [ ] Actual user testing 

611- [ ] Feedback collection and incorporation 

612- [ ] Final acceptance approval 

613 

614### Validation Templates 

615 

616#### Functionality Validation Template 

617 

618| Function ID | Function Name | Expected Result | Actual Result | Status | Notes | 

619|-------------|---------------|-----------------|---------------|--------|-------| 

620| FUNC-001 | Function 1 | Success | Testing | In Progress | Description | 

621| FUNC-002 | Function 2 | Success | Success | Passed | Description | 

622| FUNC-003 | Function 3 | Success | Failed | Failed | Description | 

623 

624#### Performance Validation Template 

625 

626| Test Item | Target | Measured | Status | Notes | 

627|-----------|--------|----------|--------|-------| 

628| Response time | 1s | 0.8s | Passed | Description | 

629| Memory usage | 100MB | 85MB | Passed | Description | 

630| CPU utilization | 50% | 45% | Passed | Description | 

631 

632### Completion Criteria 

633 

634#### Pass Criteria 

635 

636- ✅ All required functionality validation passed 

637- ✅ Performance requirements met 

638- ✅ Security testing passed 

639- ✅ User acceptance passed 

640- ✅ Documentation validation completed 

641 

642#### Reporting 

643 

644- [ ] Validation report created 

645- [ ] Identified issues documented 

646- [ ] Improvements defined 

647- [ ] Acceptance approval document prepared 

648 

649**Validation Team:** 

650- Developer: @developer 

651- QA: @qa_engineer 

652- Product Owner: @product_owner 

653- Final Approver: @stakeholder 

654""" 

655 

656 def validate_generated_spec(self, spec_content: Dict[str, str]) -> Dict[str, Any]: 

657 """ 

658 Validate quality of generated spec. 

659 

660 Args: 

661 spec_content: Dictionary with spec.md, plan.md, acceptance_md 

662 

663 Returns: 

664 Validation result with quality metrics 

665 """ 

666 quality_score = 0.0 

667 suggestions = [] 

668 

669 # Check EARS format compliance 

670 ears_compliance = self._check_ears_compliance(spec_content) 

671 quality_score += ears_compliance * 0.4 

672 

673 # Check completeness 

674 completeness = self._check_completeness(spec_content) 

675 quality_score += completeness * 0.3 

676 

677 # Check content quality 

678 content_quality = self._check_content_quality(spec_content) 

679 quality_score += content_quality * 0.3 

680 

681 # Generate suggestions 

682 if ears_compliance < 0.9: 

683 suggestions.append("Improvement needed to fully comply with EARS format.") 

684 

685 if completeness < 0.8: 

686 suggestions.append( 

687 "Requirements and specifications need to be more detailed." 

688 ) 

689 

690 if content_quality < 0.7: 

691 suggestions.append( 

692 "Domain-specific terminology and technical content need to be added." 

693 ) 

694 

695 return { 

696 "quality_score": min(max(quality_score, 0.0), 1.0), 

697 "ears_compliance": ears_compliance, 

698 "completeness": completeness, 

699 "content_quality": content_quality, 

700 "suggestions": suggestions, 

701 } 

702 

703 def _check_ears_compliance(self, spec_content: Dict[str, str]) -> float: 

704 """Check EARS format compliance.""" 

705 spec_md = spec_content.get("spec_md", "") 

706 

707 required_sections = [ 

708 "Overview", 

709 "Environment", 

710 "Assumptions", 

711 "Requirements", 

712 "Specifications", 

713 ] 

714 

715 found_sections = 0 

716 for section in required_sections: 

717 if section in spec_md: 

718 found_sections += 1 

719 

720 return found_sections / len(required_sections) 

721 

722 def _check_completeness(self, spec_content: Dict[str, str]) -> float: 

723 """Check content completeness.""" 

724 spec_md = spec_content.get("spec_md", "") 

725 plan_md = spec_content.get("plan_md", "") 

726 acceptance_md = spec_content.get("acceptance_md", "") 

727 

728 # Check minimum content length 

729 total_length = len(spec_md) + len(plan_md) + len(acceptance_md) 

730 length_score = min(total_length / 2000, 1.0) # 2000 chars as baseline 

731 

732 # Check for content diversity 

733 has_requirements = "Requirements" in spec_md 

734 has_planning = "Implementation Plan" in plan_md 

735 has_acceptance = "Acceptance" in acceptance_md 

736 

737 diversity_score = 0.0 

738 if has_requirements: 

739 diversity_score += 0.3 

740 if has_planning: 

741 diversity_score += 0.3 

742 if has_acceptance: 

743 diversity_score += 0.4 

744 

745 return (length_score + diversity_score) / 2 

746 

747 def _check_content_quality(self, spec_content: Dict[str, str]) -> float: 

748 """Check content quality.""" 

749 spec_md = spec_content.get("spec_md", "") 

750 

751 # Check for technical terms 

752 technical_indicators = [ 

753 "API", 

754 "data", 

755 "interface", 

756 "module", 

757 "component", 

758 "architecture", 

759 ] 

760 technical_score = sum( 

761 1 for term in technical_indicators if term in spec_md 

762 ) / len(technical_indicators) 

763 

764 # Check for specificity 

765 has_requirements = re.search(r"REQ-\d+", spec_md) 

766 has_specifications = re.search(r"SPEC-\d+", spec_md) 

767 

768 specificity_score = 0.0 

769 if has_requirements: 

770 specificity_score += 0.5 

771 if has_specifications: 

772 specificity_score += 0.5 

773 

774 return (technical_score + specificity_score) / 2 

775 

776 def create_spec_files( 

777 self, spec_id: str, content: Dict[str, str], base_dir: str = ".moai/specs" 

778 ) -> bool: 

779 """ 

780 Create SPEC files in the correct directory structure. 

781 

782 Args: 

783 spec_id: SPEC identifier 

784 content: Dictionary with spec_md, plan_md, acceptance_md 

785 base_dir: Base directory for specs 

786 

787 Returns: 

788 True if files were created successfully 

789 """ 

790 try: 

791 # Create spec directory 

792 spec_dir = os.path.join(base_dir, f"SPEC-{spec_id}") 

793 os.makedirs(spec_dir, exist_ok=True) 

794 

795 # Create files 

796 files_to_create = [ 

797 ("spec.md", content.get("spec_md", "")), 

798 ("plan.md", content.get("plan_md", "")), 

799 ("acceptance.md", content.get("acceptance_md", "")), 

800 ] 

801 

802 for filename, content_text in files_to_create: 

803 file_path = os.path.join(spec_dir, filename) 

804 with open(file_path, "w", encoding="utf-8") as f: 

805 f.write(content_text) 

806 

807 logger.info(f"Created spec file: {file_path}") 

808 

809 return True 

810 

811 except Exception as e: 

812 logger.error(f"Failed to create spec files: {e}") 

813 return False 

814 

815 def execute( 

816 self, tool_name: str, tool_args: Dict[str, Any], result: Any = None 

817 ) -> Dict[str, Any]: 

818 """ 

819 Execute the auto-spec completion hook. 

820 

821 Args: 

822 tool_name: Name of the tool that was executed 

823 tool_args: Arguments passed to the tool 

824 result: Result from tool execution 

825 

826 Returns: 

827 Execution result 

828 """ 

829 start_time = time.time() 

830 

831 try: 

832 # Check if we should trigger spec completion 

833 if not self.should_trigger_spec_completion(tool_name, tool_args): 

834 return { 

835 "success": False, 

836 "message": "Auto-spec completion not triggered", 

837 "execution_time": time.time() - start_time, 

838 } 

839 

840 # Detect code changes 

841 changed_files = self.detect_code_changes(tool_name, tool_args, result) 

842 

843 if not changed_files: 

844 return { 

845 "success": False, 

846 "message": "No code changes detected", 

847 "execution_time": time.time() - start_time, 

848 } 

849 

850 # Process each changed file 

851 results = [] 

852 for file_path in changed_files: 

853 try: 

854 # Analyze the code file 

855 analysis = self.spec_generator.analyze(file_path) 

856 

857 # Calculate confidence 

858 confidence = self.calculate_completion_confidence(analysis) 

859 

860 # Skip if confidence is too low 

861 min_confidence = self.auto_config.get("min_confidence", 0.7) 

862 if confidence < min_confidence: 

863 logger.info( 

864 f"Confidence {confidence} below threshold {min_confidence}" 

865 ) 

866 continue 

867 

868 # Generate complete spec 

869 spec_content = self.generate_complete_spec(analysis, file_path) 

870 

871 # Validate quality 

872 validation = self.validate_generated_spec(spec_content) 

873 

874 # Create spec files 

875 spec_id = spec_content["spec_id"] 

876 created = self.create_spec_files(spec_id, spec_content) 

877 

878 results.append( 

879 { 

880 "file_path": file_path, 

881 "spec_id": spec_id, 

882 "confidence": confidence, 

883 "quality_score": validation["quality_score"], 

884 "created": created, 

885 } 

886 ) 

887 

888 logger.info(f"Auto-generated SPEC for {file_path}: {spec_id}") 

889 

890 except Exception as e: 

891 logger.error(f"Error processing {file_path}: {e}") 

892 results.append({"file_path": file_path, "error": str(e)}) 

893 

894 # Generate summary 

895 successful_creations = [r for r in results if r.get("created", False)] 

896 failed_creations = [r for r in results if not r.get("created", False)] 

897 

898 execution_result = { 

899 "success": len(successful_creations) > 0, 

900 "generated_specs": successful_creations, 

901 "failed_files": failed_creations, 

902 "execution_time": time.time() - start_time, 

903 } 

904 

905 # Add notification message 

906 if successful_creations: 

907 execution_result["message"] = ( 

908 f"Auto-generated {len(successful_creations)} SPEC(s)" 

909 ) 

910 elif failed_creations: 

911 execution_result["message"] = ( 

912 "Auto-spec completion attempted but no specs created" 

913 ) 

914 else: 

915 execution_result["message"] = "No files required auto-spec completion" 

916 

917 return execution_result 

918 

919 except Exception as e: 

920 logger.error(f"Error in auto-spec completion: {e}") 

921 return { 

922 "success": False, 

923 "error": str(e), 

924 "execution_time": time.time() - start_time, 

925 }