Coverage for src / moai_adk / core / integration / integration_tester.py: 31.67%
60 statements
« prev ^ index » next coverage.py v7.12.0, created at 2025-11-20 20:52 +0900
« prev ^ index » next coverage.py v7.12.0, created at 2025-11-20 20:52 +0900
1"""
2Integration Tester - Main Interface
4Provides a high-level interface for comprehensive integration testing
5of MoAI-ADK components. This module serves as the main entry point
6for integration testing functionality.
7"""
9from typing import Any, Callable, Dict, List, Optional, Union
11from .engine import TestEngine
12from .models import IntegrationTestResult, TestComponent, TestSuite
13from .utils import ComponentDiscovery, TestEnvironment, TestResultAnalyzer
16class IntegrationTester:
17 """
18 Comprehensive integration tester for MoAI-ADK components.
20 This class provides a high-level interface for testing multiple components
21 together to ensure they work correctly in integration scenarios.
22 """
24 def __init__(self, test_timeout: float = 30.0, max_workers: int = 4):
25 """
26 Initialize the integration tester.
28 Args:
29 test_timeout: Maximum time (in seconds) for each test
30 max_workers: Maximum number of concurrent workers
31 """
32 self.engine = TestEngine(test_timeout, max_workers)
33 self.test_results: List[IntegrationTestResult] = []
34 self.discovery = ComponentDiscovery()
35 self.analyzer = TestResultAnalyzer()
37 def add_test_result(self, result: IntegrationTestResult):
38 """
39 Add a test result to the results list.
41 Args:
42 result: Test result to add
43 """
44 self.test_results.append(result)
46 def clear_results(self):
47 """Clear all test results."""
48 self.test_results.clear()
50 def get_success_rate(self) -> float:
51 """
52 Get the success rate of all tests.
54 Returns:
55 Success rate as percentage (0-100)
56 """
57 return self.analyzer.calculate_success_rate(self.test_results)
59 def get_test_stats(self) -> Dict[str, Any]:
60 """
61 Get comprehensive test statistics.
63 Returns:
64 Dictionary with test statistics
65 """
66 return self.analyzer.get_execution_stats(self.test_results)
68 def run_test(
69 self, test_func: Callable, test_name: str = None, components: List[str] = None
70 ) -> IntegrationTestResult:
71 """
72 Run a single integration test.
74 Args:
75 test_func: Test function to execute
76 test_name: Optional test name
77 components: List of components being tested
79 Returns:
80 IntegrationTestResult: Test execution result
81 """
82 result = self.engine.execute_test(test_func, test_name, components)
83 self.add_test_result(result)
84 return result
86 async def run_test_async(
87 self, test_func: Callable, test_name: str = None, components: List[str] = None
88 ) -> IntegrationTestResult:
89 """
90 Run a single integration test asynchronously.
92 Args:
93 test_func: Test function to execute
94 test_name: Optional test name
95 components: List of components being tested
97 Returns:
98 IntegrationTestResult: Test execution result
99 """
100 result = await self.engine.execute_test_async(test_func, test_name, components)
101 self.add_test_result(result)
102 return result
104 def run_test_suite(self, test_suite: TestSuite) -> List[IntegrationTestResult]:
105 """
106 Run a complete test suite.
108 Args:
109 test_suite: Test suite to run
111 Returns:
112 List of test results
113 """
114 results = []
116 def placeholder_test():
117 """Placeholder test function"""
118 return True
120 for test_case_name in test_suite.test_cases:
121 # This is a simplified implementation
122 # In practice, you would map test case names to actual test functions
123 result = self.run_test(
124 placeholder_test, test_case_name, [c.name for c in test_suite.components]
125 )
126 results.append(result)
128 return results
130 def run_concurrent_tests(
131 self, tests: List[tuple], timeout: Optional[float] = None
132 ) -> List[IntegrationTestResult]:
133 """
134 Run multiple tests concurrently.
136 Args:
137 tests: List of (test_func, test_name, components) tuples
138 timeout: Optional timeout for entire batch
140 Returns:
141 List of test results
142 """
143 results = self.engine.run_concurrent_tests(tests, timeout)
144 self.test_results.extend(results)
145 return results
147 async def run_concurrent_tests_async(
148 self, tests: List[tuple], timeout: Optional[float] = None
149 ) -> List[IntegrationTestResult]:
150 """
151 Run multiple tests concurrently asynchronously.
153 Args:
154 tests: List of (test_func, test_name, components) tuples
155 timeout: Optional timeout for entire batch
157 Returns:
158 List of test results
159 """
160 results = await self.engine.run_concurrent_tests_async(tests, timeout)
161 self.test_results.extend(results)
162 return results
164 def discover_components(self, base_path: str) -> List[TestComponent]:
165 """
166 Discover testable components in the given path.
168 Args:
169 base_path: Base path to search for components
171 Returns:
172 List of discovered components
173 """
174 return self.discovery.discover_components(base_path)
176 def create_test_environment(
177 self, temp_dir: Optional[str] = None
178 ) -> TestEnvironment:
179 """
180 Create a test environment for integration testing.
182 Args:
183 temp_dir: Optional temporary directory path
185 Returns:
186 TestEnvironment instance
187 """
188 return TestEnvironment(temp_dir)
190 def export_results(self, format: str = "dict") -> Union[Dict, str]:
191 """
192 Export test results in specified format.
194 Args:
195 format: Export format ("dict", "json", "summary")
197 Returns:
198 Exported results
199 """
200 if format == "dict":
201 return [vars(result) for result in self.test_results]
202 elif format == "summary":
203 return {
204 "stats": self.get_test_stats(),
205 "failed_tests": self.analyzer.get_failed_tests(self.test_results),
206 }
207 else:
208 raise ValueError(f"Unsupported format: {format}")
210 def validate_test_environment(self) -> List[str]:
211 """
212 Validate the test environment.
214 Returns:
215 List of validation warnings
216 """
217 warnings = []
219 # Check if we have any test results
220 if not self.test_results:
221 warnings.append("No test results found")
223 # Check success rate
224 success_rate = self.get_success_rate()
225 if success_rate < 80.0:
226 warnings.append(f"Low success rate: {success_rate:.1f}%")
228 return warnings