Coverage for src / moai_adk / core / hooks / post_tool_auto_spec_completion.py: 13.28%
241 statements
« prev ^ index » next coverage.py v7.12.0, created at 2025-11-20 20:52 +0900
« prev ^ index » next coverage.py v7.12.0, created at 2025-11-20 20:52 +0900
1"""PostToolUse Hook for Automated SPEC Completion System."""
3import hashlib
4import logging
5import os
6import re
7import time
8from typing import Any, Dict, List
11# SpecGenerator: Placeholder for spec generation functionality
12class SpecGenerator:
13 """Placeholder SpecGenerator class for auto-spec completion."""
15 def __init__(self):
16 self.name = "SpecGenerator"
18 def generate_spec(self, file_path: str, content: str) -> str:
19 """Generate a basic SPEC document."""
20 return f"SPEC document for {file_path}\n\nContent analysis:\n{content[:200]}..."
23# BaseHook: Simplified base hook class for auto-spec completion
24class BaseHook:
25 """Base hook class for auto-spec completion."""
27 def __init__(self):
28 self.name = "PostToolAutoSpecCompletion"
29 self.description = "PostToolUse Hook for Automated SPEC Completion System"
32# Configure logging
33logger = logging.getLogger(__name__)
36class PostToolAutoSpecCompletion(BaseHook):
37 """
38 PostToolUse Hook for automated SPEC completion.
40 This hook detects code file changes after Write/Edit/MultiEdit tools
41 and automatically generates complete SPEC documents in EARS format.
42 """
44 def __init__(self):
45 super().__init__()
46 self.spec_generator = SpecGenerator()
47 self.auto_config = self._get_auto_spec_config()
49 # Track processed files to avoid duplicates
50 self.processed_files = set()
52 def _get_auto_spec_config(self) -> Dict[str, Any]:
53 """Get auto-spec completion configuration."""
54 try:
55 from moai_adk.core.config.config_manager import ConfigManager
57 config = ConfigManager()
58 return config.get_value(
59 "auto_spec_completion",
60 {
61 "enabled": True,
62 "min_confidence": 0.7,
63 "auto_open_editor": True,
64 "supported_languages": ["python", "javascript", "typescript", "go"],
65 "excluded_patterns": ["test_", "spec_", "__tests__"],
66 },
67 )
68 except ImportError:
69 return {
70 "enabled": True,
71 "min_confidence": 0.7,
72 "auto_open_editor": True,
73 "supported_languages": ["python", "javascript", "typescript", "go"],
74 "excluded_patterns": ["test_", "spec_", "__tests__"],
75 }
77 def should_trigger_spec_completion(
78 self, tool_name: str, tool_args: Dict[str, Any]
79 ) -> bool:
80 """
81 Determine if spec completion should be triggered.
83 Args:
84 tool_name: Name of the tool that was executed
85 tool_args: Arguments passed to the tool
87 Returns:
88 True if spec completion should be triggered
89 """
90 # Check if auto-spec completion is enabled
91 if not self.auto_config.get("enabled", True):
92 logger.debug("Auto-spec completion is disabled")
93 return False
95 # Only trigger for Write/Edit/MultiEdit tools
96 if tool_name not in ["Write", "Edit", "MultiEdit"]:
97 logger.debug(f"Tool {tool_name} does not trigger spec completion")
98 return False
100 # Extract file paths from tool arguments
101 file_paths = self._extract_file_paths(tool_args)
103 if not file_paths:
104 logger.debug("No file paths found in tool arguments")
105 return False
107 # Check if any file is a supported language
108 supported_files = []
109 for file_path in file_paths:
110 if self._is_supported_file(file_path):
111 supported_files.append(file_path)
112 else:
113 logger.debug(
114 f"File {file_path} is not supported for auto-spec completion"
115 )
117 if not supported_files:
118 logger.debug("No supported files found")
119 return False
121 # Check for excluded patterns
122 excluded_files = []
123 for file_path in supported_files:
124 if self._is_excluded_file(file_path):
125 excluded_files.append(file_path)
127 # Filter out excluded files
128 target_files = [f for f in supported_files if f not in excluded_files]
130 if not target_files:
131 logger.debug("All files are excluded from auto-spec completion")
132 return False
134 return True
136 def _extract_file_paths(self, tool_args: Dict[str, Any]) -> List[str]:
137 """Extract file paths from tool arguments."""
138 file_paths = []
140 # Handle Write tool
141 if "file_path" in tool_args:
142 file_paths.append(tool_args["file_path"])
144 # Handle Edit tool
145 if "file_path" in tool_args:
146 file_paths.append(tool_args["file_path"])
148 # Handle MultiEdit tool
149 if "edits" in tool_args:
150 for edit in tool_args["edits"]:
151 if "file_path" in edit:
152 file_paths.append(edit["file_path"])
154 # Remove duplicates and resolve relative paths
155 unique_paths = []
156 for path in file_paths:
157 if path not in unique_paths:
158 abs_path = os.path.abspath(path)
159 unique_paths.append(abs_path)
161 return unique_paths
163 def _is_supported_file(self, file_path: str) -> bool:
164 """Check if file is supported for auto-spec completion."""
165 # Get file extension
166 file_ext = os.path.splitext(file_path)[1].lower()
168 # Map extensions to languages
169 supported_extensions = {
170 ".py": "python",
171 ".js": "javascript",
172 ".jsx": "javascript",
173 ".ts": "typescript",
174 ".tsx": "typescript",
175 ".go": "go",
176 }
178 if file_ext not in supported_extensions:
179 return False
181 # Check if language is supported
182 language = supported_extensions[file_ext]
183 supported_languages = self.auto_config.get("supported_languages", [])
184 return language in supported_languages
186 def _is_excluded_file(self, file_path: str) -> bool:
187 """Check if file should be excluded from auto-spec completion."""
188 file_name = os.path.basename(file_path)
189 file_dir = os.path.basename(os.path.dirname(file_path))
191 excluded_patterns = self.auto_config.get("excluded_patterns", [])
193 for pattern in excluded_patterns:
194 # Check filename patterns
195 if re.search(pattern, file_name):
196 return True
197 # Check directory patterns
198 if re.search(pattern, file_dir):
199 return True
201 return False
203 def detect_code_changes(
204 self, tool_name: str, tool_args: Dict[str, Any], result: Any
205 ) -> List[str]:
206 """
207 Detect code changes from tool execution.
209 Args:
210 tool_name: Name of the tool that was executed
211 tool_args: Arguments passed to the tool
212 result: Result from tool execution
214 Returns:
215 List of affected file paths
216 """
217 file_paths = []
219 # Write tool creates new files
220 if tool_name == "Write":
221 if "file_path" in tool_args:
222 file_paths.append(tool_args["file_path"])
224 # Edit tool modifies existing files
225 elif tool_name == "Edit":
226 if "file_path" in tool_args:
227 file_paths.append(tool_args["file_path"])
229 # MultiEdit tool can modify multiple files
230 elif tool_name == "MultiEdit":
231 if "edits" in tool_args:
232 for edit in tool_args["edits"]:
233 if "file_path" in edit:
234 file_paths.append(edit["file_path"])
236 # Convert to absolute paths
237 abs_paths = [os.path.abspath(path) for path in file_paths]
239 # Filter out already processed files
240 new_paths = [path for path in abs_paths if path not in self.processed_files]
242 # Add to processed files
243 self.processed_files.update(new_paths)
245 return new_paths
247 def calculate_completion_confidence(self, analysis: Dict[str, Any]) -> float:
248 """
249 Calculate confidence score for SPEC completion.
251 Args:
252 analysis: Code analysis result
254 Returns:
255 Confidence score between 0.0 and 1.0
256 """
257 # Default confidence if analysis is incomplete
258 if not analysis:
259 return 0.5
261 structure_score = analysis.get("structure_score", 0.5)
262 domain_accuracy = analysis.get("domain_accuracy", 0.5)
263 documentation_level = analysis.get("documentation_level", 0.5)
265 # Weighted calculation
266 # Structure clarity: 30%
267 # Domain accuracy: 40%
268 # Documentation level: 30%
269 confidence = (
270 structure_score * 0.3 + domain_accuracy * 0.4 + documentation_level * 0.3
271 )
273 return min(max(confidence, 0.0), 1.0)
275 def generate_complete_spec(
276 self, analysis: Dict[str, Any], file_path: str
277 ) -> Dict[str, str]:
278 """
279 Generate complete SPEC documents in EARS format.
281 Args:
282 analysis: Code analysis result
283 file_path: Path to the analyzed file
285 Returns:
286 Dictionary containing spec.md, plan.md, and acceptance.md
287 """
288 spec_id = self._generate_spec_id(file_path)
289 file_name = os.path.basename(file_path)
291 # Generate basic spec content
292 spec_md = self._generate_spec_content(analysis, spec_id, file_name)
293 plan_md = self._generate_plan_content(analysis, spec_id, file_name)
294 acceptance_md = self._generate_acceptance_content(analysis, spec_id, file_name)
296 return {
297 "spec_id": spec_id,
298 "spec_md": spec_md,
299 "plan_md": plan_md,
300 "acceptance_md": acceptance_md,
301 }
303 def _generate_spec_id(self, file_path: str) -> str:
304 """Generate unique SPEC ID from file path."""
305 # Extract meaningful name from file path
306 file_name = os.path.basename(file_path)
307 name_parts = file_name.split("_")
309 # Convert to uppercase and join
310 meaningful_name = "".join(part.upper() for part in name_parts if part)
312 # Add hash to ensure uniqueness
313 file_hash = hashlib.md5(file_path.encode()).hexdigest()[:4]
315 return f"{meaningful_name}-{file_hash}"
317 def _generate_spec_content(
318 self, analysis: Dict[str, Any], spec_id: str, file_name: str
319 ) -> str:
320 """Generate main spec.md content."""
321 template = f"""---
322 "id": "SPEC-{spec_id}",
323 "title": "Auto-generated SPEC for {file_name}",
324 "title_en": "Auto-generated SPEC for {file_name}",
325 "version": "1.0.0",
326 "status": "pending",
327 "created": "{time.strftime('%Y-%m-%d')}",
328 "author": "@alfred-auto",
329 "reviewer": "",
330 "category": "FEATURE",
331 "priority": "MEDIUM",
332 "tags": ["auto-generated", "{spec_id}"],
333 "language": "en",
334 "estimated_complexity": "auto"
335}}
336---
338## Auto-generated SPEC for {file_name}
340### Overview
342{analysis.get('description', 'This spec was auto-generated based on code analysis.')}
344### Environment
346- **Project**: MoAI-ADK Auto-generated SPEC
347- **Language**: {analysis.get('language', 'Python')}
348- **File**: {file_name}
349- **Generation Method**: Automatic analysis-based
350- **Status**: Review required
352### Assumptions
3541. Code structure is clearly defined
3552. Domain-specific terminology is expected to be used
3563. Standard development practices are assumed to be followed
3574. Generated SPEC will be finalized after user review
359### Requirements
361#### Ubiquitous Requirements
363- **REQ-001**: System must perform the functionality of {file_name}
364- **REQ-002**: Generated functionality must be stable
365- **REQ-003**: Code must be written in a maintainable form
366- **REQ-004**: Tests must satisfy functional requirements
367- **REQ-005**: Code must comply with project coding standards
369#### State-driven Requirements
371{analysis.get('state_requirements', '- **REQ-006**: System must transition from initial state to target state')}
373#### Event-driven Requirements
375{analysis.get('event_requirements', '- **REQ-007**: System must respond when user input occurs')}
377### Specifications
379{analysis.get('specifications', '- **SPEC-001**: System must implement requirements')}
381### Traceability
384### Edit Guide
386**User Review Recommendations:**
3871. ✅ Verify technical clarity
3882. ✅ Specify requirements
3893. ✅ Review domain-specific terminology
3904. ✅ Define state and event requirements
3915. ✅ Detail specifications
393**Quality Improvement Suggestions:**
394- Add domain-specific terminology
395- Specify user cases
396- Define performance requirements
397- Add security requirements
398"""
399 return template
401 def _generate_plan_content(
402 self, analysis: Dict[str, Any], spec_id: str, file_name: str
403 ) -> str:
404 """Generate plan.md content."""
405 return f"""---
406 "id": "PLAN-{spec_id}",
407 "spec_id": "SPEC-{spec_id}",
408 "title": "Auto-generated Implementation Plan for {file_name}",
409 "version": "1.0.0",
410 "status": "pending",
411 "created": "{time.strftime('%Y-%m-%d')}",
412 "author": "@alfred-auto"
413}}
414---
416## Auto-generated Implementation Plan for {file_name}
418### Implementation Phases
420#### Phase 1: Basic Structure Review (Priority: High)
422- [ ] Complete code structure analysis
423- [ ] Identify core functionality
424- [ ] Verify dependencies
425- [ ] Set up test environment
427#### Phase 2: Requirements Specification (Priority: Medium)
429- [ ] Specify ubiquitous requirements
430- [ ] Define state-driven requirements
431- [ ] Review event-driven requirements
432- [ ] Set performance requirements
434#### Phase 3: Implementation Planning (Priority: Medium)
436- [ ] Design module architecture
437- [ ] Define interfaces
438- [ ] Design data structures
439- [ ] Plan error handling
441#### Phase 4: Test Strategy Development (Priority: High)
443- [ ] Plan unit tests
444- [ ] Plan integration tests
445- [ ] User story-based testing
446- [ ] Implement test automation
448### Technical Approach
450#### Architecture Design
452```
453{analysis.get('architecture', 'User Input → Validation → Business Logic → Data Processing → Output')}
454 ↓
455[Core Components] → [External Services] → [Data Layer]
456```
458#### Core Components
4601. **{analysis.get('main_component', 'Main Class')}**: Handle primary business logic
4612. **{analysis.get('service_component', 'Service Layer')}**: Integrate external services
4623. **{analysis.get('data_component', 'Data Layer')}**: Process and store data
4634. **{analysis.get('component_4', 'Validation Layer')}**: Validate input and check validity
465#### Dependency Management
467**Utilize Existing Modules:**
468- {analysis.get('existing_modules', 'Utilize standard libraries')}
470**Add New Modules:**
471- {analysis.get('new_modules', 'Add as needed')}
473### Success Criteria
475#### Functional Criteria
477- ✅ All requirements implemented
478- ✅ Test coverage 85% or higher
479- ✅ Performance targets met
480- ✅ User requirements satisfied
482#### Performance Criteria
484- ✅ Response time {analysis.get('performance_target', 'within 1 second')}
485- ✅ Memory usage optimized
486- ✅ Parallel processing supported
487- ✅ Scalability verified
489#### Quality Criteria
491- ✅ Code quality verification passed
492- ✅ Security scanning passed
493- ✅ Documentation completeness verified
494- ✅ Maintainability validated
496### Next Steps
4981. **Immediate**: Basic structure review (1-2 days)
4992. **Weekly Goal**: Requirements specification (3-5 days)
5003. **2-Week Goal**: Implementation completion (7-14 days)
5014. **Release Preparation**: Testing and validation (14-16 days)
502"""
504 def _generate_acceptance_content(
505 self, analysis: Dict[str, Any], spec_id: str, file_name: str
506 ) -> str:
507 """Generate acceptance.md content."""
508 return f"""---
509 "id": "ACCEPT-{spec_id}",
510 "spec_id": "SPEC-{spec_id}",
511 "title": "Auto-generated Acceptance Criteria for {file_name}",
512 "version": "1.0.0",
513 "status": "pending",
514 "created": "{time.strftime('%Y-%m-%d')}",
515 "author": "@alfred-auto"
516}}
517---
519## Auto-generated Acceptance Criteria for {file_name}
521### Acceptance Criteria
523#### Basic Functionality
525**Must-have:**
526- [ ] {analysis.get('must_have_1', 'System must operate normally')}
527- [ ] {analysis.get('must_have_2', 'User interface must display correctly')}
528- [ ] {analysis.get('must_have_3', 'Data processing logic must function properly')}
530**Should-have:**
531- [ ] {analysis.get('should_have_1', 'User experience must be smooth')}
532- [ ] {analysis.get('should_have_2', 'Performance targets must be met')}
534#### Performance Testing
536**Performance Requirements:**
537- [ ] Response time: {analysis.get('response_time', 'within 1 second')}
538- [ ] Concurrent users: support {analysis.get('concurrent_users', '100 users')} or more
539- [ ] Memory usage: {analysis.get('memory_usage', '100MB or less')}
540- [ ] CPU utilization: {analysis.get('cpu_usage', '50% or less')}
542**Load Testing:**
543- [ ] Functional load testing passed
544- [ ] Long-term stability testing passed
545- [ ] Recovery testing passed
547#### Security Testing
549**Security Requirements:**
550- [ ] {analysis.get('security_req_1', 'Authentication and authorization verification passed')}
551- [ ] {analysis.get('security_req_2', 'Input validation passed')}
552- [ ] {analysis.get('security_req_3', 'SQL injection protection passed')}
554**Vulnerability Testing:**
555- [ ] OWASP Top 10 inspection passed
556- [ ] Security scanning passed
557- [ ] Permission settings verification passed
559#### Compatibility Testing
561**Browser Compatibility:**
562- [ ] Chrome latest version
563- [ ] Firefox latest version
564- [ ] Safari latest version
565- [ ] Edge latest version
567**Device Compatibility:**
568- [ ] Desktop (1920x1080)
569- [ ] Tablet (768x1024)
570- [ ] Mobile (375x667)
572#### User Acceptance Testing
574**User Scenarios:**
575- [ ] {analysis.get('user_scenario_1', 'General user scenario testing passed')}
576- [ ] {analysis.get('user_scenario_2', 'Administrator scenario testing passed')}
577- [ ] {analysis.get('user_scenario_3', 'Error handling scenario testing passed')}
579**User Feedback:**
580- [ ] User satisfaction 80% or higher
581- [ ] Feature usability evaluation
582- [ ] Design and UI/UX verification
584### Validation Process
586#### Phase 1: Unit Tests
588- [ ] Developer testing completed
589- [ ] Code review passed
590- [ ] Automated testing passed
591- [ ] Code coverage 85% or higher
593#### Phase 2: Integration Tests
595- [ ] Inter-module integration testing
596- [ ] API integration testing
597- [ ] Database integration testing
598- [ ] External service integration testing
600#### Phase 3: System Tests
602- [ ] Full system functionality testing
603- [ ] Performance testing
604- [ ] Security testing
605- [ ] Stability testing
607#### Phase 4: User Tests
609- [ ] Internal user testing
610- [ ] Actual user testing
611- [ ] Feedback collection and incorporation
612- [ ] Final acceptance approval
614### Validation Templates
616#### Functionality Validation Template
618| Function ID | Function Name | Expected Result | Actual Result | Status | Notes |
619|-------------|---------------|-----------------|---------------|--------|-------|
620| FUNC-001 | Function 1 | Success | Testing | In Progress | Description |
621| FUNC-002 | Function 2 | Success | Success | Passed | Description |
622| FUNC-003 | Function 3 | Success | Failed | Failed | Description |
624#### Performance Validation Template
626| Test Item | Target | Measured | Status | Notes |
627|-----------|--------|----------|--------|-------|
628| Response time | 1s | 0.8s | Passed | Description |
629| Memory usage | 100MB | 85MB | Passed | Description |
630| CPU utilization | 50% | 45% | Passed | Description |
632### Completion Criteria
634#### Pass Criteria
636- ✅ All required functionality validation passed
637- ✅ Performance requirements met
638- ✅ Security testing passed
639- ✅ User acceptance passed
640- ✅ Documentation validation completed
642#### Reporting
644- [ ] Validation report created
645- [ ] Identified issues documented
646- [ ] Improvements defined
647- [ ] Acceptance approval document prepared
649**Validation Team:**
650- Developer: @developer
651- QA: @qa_engineer
652- Product Owner: @product_owner
653- Final Approver: @stakeholder
654"""
656 def validate_generated_spec(self, spec_content: Dict[str, str]) -> Dict[str, Any]:
657 """
658 Validate quality of generated spec.
660 Args:
661 spec_content: Dictionary with spec.md, plan.md, acceptance_md
663 Returns:
664 Validation result with quality metrics
665 """
666 quality_score = 0.0
667 suggestions = []
669 # Check EARS format compliance
670 ears_compliance = self._check_ears_compliance(spec_content)
671 quality_score += ears_compliance * 0.4
673 # Check completeness
674 completeness = self._check_completeness(spec_content)
675 quality_score += completeness * 0.3
677 # Check content quality
678 content_quality = self._check_content_quality(spec_content)
679 quality_score += content_quality * 0.3
681 # Generate suggestions
682 if ears_compliance < 0.9:
683 suggestions.append("Improvement needed to fully comply with EARS format.")
685 if completeness < 0.8:
686 suggestions.append(
687 "Requirements and specifications need to be more detailed."
688 )
690 if content_quality < 0.7:
691 suggestions.append(
692 "Domain-specific terminology and technical content need to be added."
693 )
695 return {
696 "quality_score": min(max(quality_score, 0.0), 1.0),
697 "ears_compliance": ears_compliance,
698 "completeness": completeness,
699 "content_quality": content_quality,
700 "suggestions": suggestions,
701 }
703 def _check_ears_compliance(self, spec_content: Dict[str, str]) -> float:
704 """Check EARS format compliance."""
705 spec_md = spec_content.get("spec_md", "")
707 required_sections = [
708 "Overview",
709 "Environment",
710 "Assumptions",
711 "Requirements",
712 "Specifications",
713 ]
715 found_sections = 0
716 for section in required_sections:
717 if section in spec_md:
718 found_sections += 1
720 return found_sections / len(required_sections)
722 def _check_completeness(self, spec_content: Dict[str, str]) -> float:
723 """Check content completeness."""
724 spec_md = spec_content.get("spec_md", "")
725 plan_md = spec_content.get("plan_md", "")
726 acceptance_md = spec_content.get("acceptance_md", "")
728 # Check minimum content length
729 total_length = len(spec_md) + len(plan_md) + len(acceptance_md)
730 length_score = min(total_length / 2000, 1.0) # 2000 chars as baseline
732 # Check for content diversity
733 has_requirements = "Requirements" in spec_md
734 has_planning = "Implementation Plan" in plan_md
735 has_acceptance = "Acceptance" in acceptance_md
737 diversity_score = 0.0
738 if has_requirements:
739 diversity_score += 0.3
740 if has_planning:
741 diversity_score += 0.3
742 if has_acceptance:
743 diversity_score += 0.4
745 return (length_score + diversity_score) / 2
747 def _check_content_quality(self, spec_content: Dict[str, str]) -> float:
748 """Check content quality."""
749 spec_md = spec_content.get("spec_md", "")
751 # Check for technical terms
752 technical_indicators = [
753 "API",
754 "data",
755 "interface",
756 "module",
757 "component",
758 "architecture",
759 ]
760 technical_score = sum(
761 1 for term in technical_indicators if term in spec_md
762 ) / len(technical_indicators)
764 # Check for specificity
765 has_requirements = re.search(r"REQ-\d+", spec_md)
766 has_specifications = re.search(r"SPEC-\d+", spec_md)
768 specificity_score = 0.0
769 if has_requirements:
770 specificity_score += 0.5
771 if has_specifications:
772 specificity_score += 0.5
774 return (technical_score + specificity_score) / 2
776 def create_spec_files(
777 self, spec_id: str, content: Dict[str, str], base_dir: str = ".moai/specs"
778 ) -> bool:
779 """
780 Create SPEC files in the correct directory structure.
782 Args:
783 spec_id: SPEC identifier
784 content: Dictionary with spec_md, plan_md, acceptance_md
785 base_dir: Base directory for specs
787 Returns:
788 True if files were created successfully
789 """
790 try:
791 # Create spec directory
792 spec_dir = os.path.join(base_dir, f"SPEC-{spec_id}")
793 os.makedirs(spec_dir, exist_ok=True)
795 # Create files
796 files_to_create = [
797 ("spec.md", content.get("spec_md", "")),
798 ("plan.md", content.get("plan_md", "")),
799 ("acceptance.md", content.get("acceptance_md", "")),
800 ]
802 for filename, content_text in files_to_create:
803 file_path = os.path.join(spec_dir, filename)
804 with open(file_path, "w", encoding="utf-8") as f:
805 f.write(content_text)
807 logger.info(f"Created spec file: {file_path}")
809 return True
811 except Exception as e:
812 logger.error(f"Failed to create spec files: {e}")
813 return False
815 def execute(
816 self, tool_name: str, tool_args: Dict[str, Any], result: Any = None
817 ) -> Dict[str, Any]:
818 """
819 Execute the auto-spec completion hook.
821 Args:
822 tool_name: Name of the tool that was executed
823 tool_args: Arguments passed to the tool
824 result: Result from tool execution
826 Returns:
827 Execution result
828 """
829 start_time = time.time()
831 try:
832 # Check if we should trigger spec completion
833 if not self.should_trigger_spec_completion(tool_name, tool_args):
834 return {
835 "success": False,
836 "message": "Auto-spec completion not triggered",
837 "execution_time": time.time() - start_time,
838 }
840 # Detect code changes
841 changed_files = self.detect_code_changes(tool_name, tool_args, result)
843 if not changed_files:
844 return {
845 "success": False,
846 "message": "No code changes detected",
847 "execution_time": time.time() - start_time,
848 }
850 # Process each changed file
851 results = []
852 for file_path in changed_files:
853 try:
854 # Analyze the code file
855 analysis = self.spec_generator.analyze(file_path)
857 # Calculate confidence
858 confidence = self.calculate_completion_confidence(analysis)
860 # Skip if confidence is too low
861 min_confidence = self.auto_config.get("min_confidence", 0.7)
862 if confidence < min_confidence:
863 logger.info(
864 f"Confidence {confidence} below threshold {min_confidence}"
865 )
866 continue
868 # Generate complete spec
869 spec_content = self.generate_complete_spec(analysis, file_path)
871 # Validate quality
872 validation = self.validate_generated_spec(spec_content)
874 # Create spec files
875 spec_id = spec_content["spec_id"]
876 created = self.create_spec_files(spec_id, spec_content)
878 results.append(
879 {
880 "file_path": file_path,
881 "spec_id": spec_id,
882 "confidence": confidence,
883 "quality_score": validation["quality_score"],
884 "created": created,
885 }
886 )
888 logger.info(f"Auto-generated SPEC for {file_path}: {spec_id}")
890 except Exception as e:
891 logger.error(f"Error processing {file_path}: {e}")
892 results.append({"file_path": file_path, "error": str(e)})
894 # Generate summary
895 successful_creations = [r for r in results if r.get("created", False)]
896 failed_creations = [r for r in results if not r.get("created", False)]
898 execution_result = {
899 "success": len(successful_creations) > 0,
900 "generated_specs": successful_creations,
901 "failed_files": failed_creations,
902 "execution_time": time.time() - start_time,
903 }
905 # Add notification message
906 if successful_creations:
907 execution_result["message"] = (
908 f"Auto-generated {len(successful_creations)} SPEC(s)"
909 )
910 elif failed_creations:
911 execution_result["message"] = (
912 "Auto-spec completion attempted but no specs created"
913 )
914 else:
915 execution_result["message"] = "No files required auto-spec completion"
917 return execution_result
919 except Exception as e:
920 logger.error(f"Error in auto-spec completion: {e}")
921 return {
922 "success": False,
923 "error": str(e),
924 "execution_time": time.time() - start_time,
925 }