Spaces:
Sleeping
Sleeping
| """ | |
| Basic test script to verify imports and structure before deployment | |
| """ | |
| import sys | |
| import logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| def test_imports(): | |
| """Test all critical imports""" | |
| try: | |
| logger.info("Testing imports...") | |
| # Config | |
| from config import AppConfig | |
| logger.info("β config.AppConfig") | |
| # Core | |
| from core.llama_model import LlamaCppModel | |
| logger.info("β core.llama_model.LlamaCppModel") | |
| from core.agent import ReasoningAgent | |
| logger.info("β core.agent.ReasoningAgent") | |
| # Tools | |
| from tools.base import BaseAgentTool | |
| logger.info("β tools.base.BaseAgentTool") | |
| from tools.search import WebSearchTool, HFSearchTool | |
| logger.info("β tools.search.WebSearchTool") | |
| logger.info("β tools.search.HFSearchTool") | |
| from tools.compute import PythonEvalTool | |
| logger.info("β tools.compute.PythonEvalTool") | |
| from tools.workflow_runner import WorkflowRunnerTool | |
| logger.info("β tools.workflow_runner.WorkflowRunnerTool") | |
| from tools import create_tool_catalog | |
| logger.info("β tools.create_tool_catalog") | |
| # Workflows | |
| from workflows.schema import WorkflowTask, WorkflowDefinition | |
| logger.info("β workflows.schema.WorkflowTask") | |
| logger.info("β workflows.schema.WorkflowDefinition") | |
| from workflows.executor import WorkflowExecutor | |
| logger.info("β workflows.executor.WorkflowExecutor") | |
| from workflows.persistence import WorkflowStore | |
| logger.info("β workflows.persistence.WorkflowStore") | |
| logger.info("\nβ All imports successful!") | |
| return True | |
| except Exception as e: | |
| logger.error(f"\nβ Import failed: {e}", exc_info=True) | |
| return False | |
| def test_configuration(): | |
| """Test configuration loading""" | |
| try: | |
| logger.info("\nTesting configuration...") | |
| from config import AppConfig | |
| config = AppConfig.from_env() | |
| logger.info(f"β Model repo: {config.model.repo_id}") | |
| logger.info(f"β Model file: {config.model.filename}") | |
| logger.info(f"β Draft repo: {config.model.draft_repo_id}") | |
| logger.info(f"β Speculative decoding: {config.model.use_speculative_decoding}") | |
| logger.info(f"β Context size: {config.model.n_ctx}") | |
| logger.info(f"β Threads: {config.model.n_threads}") | |
| logger.info(f"β Max parallel: {config.workflow.max_parallel}") | |
| logger.info(f"β Use memory: {config.agent.use_memory}") | |
| logger.info("\nβ Configuration valid!") | |
| return True | |
| except Exception as e: | |
| logger.error(f"\nβ Configuration failed: {e}", exc_info=True) | |
| return False | |
| def test_tool_creation(): | |
| """Test tool creation without executor""" | |
| try: | |
| logger.info("\nTesting tool creation...") | |
| from tools.search import WebSearchTool, HFSearchTool | |
| from tools.compute import PythonEvalTool | |
| # Create search tools | |
| web_search = WebSearchTool() | |
| logger.info(f"β WebSearchTool created: {web_search.name}") | |
| hf_search = HFSearchTool() | |
| logger.info(f"β HFSearchTool created: {hf_search.name}") | |
| # Create compute tool | |
| python_eval = PythonEvalTool() | |
| logger.info(f"β PythonEvalTool created: {python_eval.name}") | |
| logger.info("\nβ Tool creation successful!") | |
| return True | |
| except Exception as e: | |
| logger.error(f"\nβ Tool creation failed: {e}", exc_info=True) | |
| return False | |
| def test_workflow_schema(): | |
| """Test workflow schema validation""" | |
| try: | |
| logger.info("\nTesting workflow schema...") | |
| from workflows.schema import WorkflowTask, WorkflowDefinition | |
| # Create simple workflow | |
| task1 = WorkflowTask( | |
| id="task1", | |
| tool="python_eval", | |
| args={"expression": "2 + 2"}, | |
| depends_on=[] | |
| ) | |
| task2 = WorkflowTask( | |
| id="task2", | |
| tool="python_eval", | |
| args={"expression": "10 * 5"}, | |
| depends_on=["task1"] | |
| ) | |
| workflow = WorkflowDefinition( | |
| name="test_workflow", | |
| description="Simple test workflow", | |
| tasks=[task1, task2], | |
| final_task="task2", | |
| max_parallel=2 | |
| ) | |
| logger.info(f"β Created workflow: {workflow.name}") | |
| logger.info(f"β Tasks: {len(workflow.tasks)}") | |
| logger.info(f"β Final task: {workflow.final_task}") | |
| # Test validation catches cycles | |
| try: | |
| task_cycle1 = WorkflowTask( | |
| id="cycle1", | |
| tool="python_eval", | |
| args={"expression": "1"}, | |
| depends_on=["cycle2"] | |
| ) | |
| task_cycle2 = WorkflowTask( | |
| id="cycle2", | |
| tool="python_eval", | |
| args={"expression": "2"}, | |
| depends_on=["cycle1"] | |
| ) | |
| WorkflowDefinition( | |
| name="cycle_test", | |
| tasks=[task_cycle1, task_cycle2], | |
| final_task="cycle1" | |
| ) | |
| logger.error("β Cycle detection failed (should have raised ValueError)") | |
| return False | |
| except ValueError: | |
| logger.info("β Cycle detection works") | |
| logger.info("\nβ Workflow schema validation successful!") | |
| return True | |
| except Exception as e: | |
| logger.error(f"\nβ Workflow schema failed: {e}", exc_info=True) | |
| return False | |
| def main(): | |
| """Run all tests""" | |
| logger.info("=" * 60) | |
| logger.info("General Reasoning Agent - Basic Structure Test") | |
| logger.info("=" * 60) | |
| tests = [ | |
| ("Imports", test_imports), | |
| ("Configuration", test_configuration), | |
| ("Tool Creation", test_tool_creation), | |
| ("Workflow Schema", test_workflow_schema), | |
| ] | |
| results = [] | |
| for name, test_func in tests: | |
| logger.info(f"\n{'=' * 60}") | |
| logger.info(f"Running: {name}") | |
| logger.info('=' * 60) | |
| result = test_func() | |
| results.append((name, result)) | |
| # Summary | |
| logger.info("\n" + "=" * 60) | |
| logger.info("TEST SUMMARY") | |
| logger.info("=" * 60) | |
| for name, result in results: | |
| status = "β PASS" if result else "β FAIL" | |
| logger.info(f"{status}: {name}") | |
| all_passed = all(result for _, result in results) | |
| logger.info("=" * 60) | |
| if all_passed: | |
| logger.info("π ALL TESTS PASSED - Ready for deployment!") | |
| logger.info("\nNext steps:") | |
| logger.info("1. Push to HuggingFace Spaces") | |
| logger.info("2. Wait for model download (~30-60s)") | |
| logger.info("3. Test MCP endpoint at /gradio_api/mcp/") | |
| logger.info("4. Configure Claude Code MCP client") | |
| return 0 | |
| else: | |
| logger.error("β SOME TESTS FAILED - Fix issues before deployment") | |
| return 1 | |
| if __name__ == "__main__": | |
| sys.exit(main()) | |