Spaces:
Sleeping
Sleeping
File size: 7,077 Bytes
4454066 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
"""
Basic test script to verify imports and structure before deployment
"""
import sys
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def test_imports():
"""Test all critical imports"""
try:
logger.info("Testing imports...")
# Config
from config import AppConfig
logger.info("β config.AppConfig")
# Core
from core.llama_model import LlamaCppModel
logger.info("β core.llama_model.LlamaCppModel")
from core.agent import ReasoningAgent
logger.info("β core.agent.ReasoningAgent")
# Tools
from tools.base import BaseAgentTool
logger.info("β tools.base.BaseAgentTool")
from tools.search import WebSearchTool, HFSearchTool
logger.info("β tools.search.WebSearchTool")
logger.info("β tools.search.HFSearchTool")
from tools.compute import PythonEvalTool
logger.info("β tools.compute.PythonEvalTool")
from tools.workflow_runner import WorkflowRunnerTool
logger.info("β tools.workflow_runner.WorkflowRunnerTool")
from tools import create_tool_catalog
logger.info("β tools.create_tool_catalog")
# Workflows
from workflows.schema import WorkflowTask, WorkflowDefinition
logger.info("β workflows.schema.WorkflowTask")
logger.info("β workflows.schema.WorkflowDefinition")
from workflows.executor import WorkflowExecutor
logger.info("β workflows.executor.WorkflowExecutor")
from workflows.persistence import WorkflowStore
logger.info("β workflows.persistence.WorkflowStore")
logger.info("\nβ
All imports successful!")
return True
except Exception as e:
logger.error(f"\nβ Import failed: {e}", exc_info=True)
return False
def test_configuration():
"""Test configuration loading"""
try:
logger.info("\nTesting configuration...")
from config import AppConfig
config = AppConfig.from_env()
logger.info(f"β Model repo: {config.model.repo_id}")
logger.info(f"β Model file: {config.model.filename}")
logger.info(f"β Draft repo: {config.model.draft_repo_id}")
logger.info(f"β Speculative decoding: {config.model.use_speculative_decoding}")
logger.info(f"β Context size: {config.model.n_ctx}")
logger.info(f"β Threads: {config.model.n_threads}")
logger.info(f"β Max parallel: {config.workflow.max_parallel}")
logger.info(f"β Use memory: {config.agent.use_memory}")
logger.info("\nβ
Configuration valid!")
return True
except Exception as e:
logger.error(f"\nβ Configuration failed: {e}", exc_info=True)
return False
def test_tool_creation():
"""Test tool creation without executor"""
try:
logger.info("\nTesting tool creation...")
from tools.search import WebSearchTool, HFSearchTool
from tools.compute import PythonEvalTool
# Create search tools
web_search = WebSearchTool()
logger.info(f"β WebSearchTool created: {web_search.name}")
hf_search = HFSearchTool()
logger.info(f"β HFSearchTool created: {hf_search.name}")
# Create compute tool
python_eval = PythonEvalTool()
logger.info(f"β PythonEvalTool created: {python_eval.name}")
logger.info("\nβ
Tool creation successful!")
return True
except Exception as e:
logger.error(f"\nβ Tool creation failed: {e}", exc_info=True)
return False
def test_workflow_schema():
"""Test workflow schema validation"""
try:
logger.info("\nTesting workflow schema...")
from workflows.schema import WorkflowTask, WorkflowDefinition
# Create simple workflow
task1 = WorkflowTask(
id="task1",
tool="python_eval",
args={"expression": "2 + 2"},
depends_on=[]
)
task2 = WorkflowTask(
id="task2",
tool="python_eval",
args={"expression": "10 * 5"},
depends_on=["task1"]
)
workflow = WorkflowDefinition(
name="test_workflow",
description="Simple test workflow",
tasks=[task1, task2],
final_task="task2",
max_parallel=2
)
logger.info(f"β Created workflow: {workflow.name}")
logger.info(f"β Tasks: {len(workflow.tasks)}")
logger.info(f"β Final task: {workflow.final_task}")
# Test validation catches cycles
try:
task_cycle1 = WorkflowTask(
id="cycle1",
tool="python_eval",
args={"expression": "1"},
depends_on=["cycle2"]
)
task_cycle2 = WorkflowTask(
id="cycle2",
tool="python_eval",
args={"expression": "2"},
depends_on=["cycle1"]
)
WorkflowDefinition(
name="cycle_test",
tasks=[task_cycle1, task_cycle2],
final_task="cycle1"
)
logger.error("β Cycle detection failed (should have raised ValueError)")
return False
except ValueError:
logger.info("β Cycle detection works")
logger.info("\nβ
Workflow schema validation successful!")
return True
except Exception as e:
logger.error(f"\nβ Workflow schema failed: {e}", exc_info=True)
return False
def main():
"""Run all tests"""
logger.info("=" * 60)
logger.info("General Reasoning Agent - Basic Structure Test")
logger.info("=" * 60)
tests = [
("Imports", test_imports),
("Configuration", test_configuration),
("Tool Creation", test_tool_creation),
("Workflow Schema", test_workflow_schema),
]
results = []
for name, test_func in tests:
logger.info(f"\n{'=' * 60}")
logger.info(f"Running: {name}")
logger.info('=' * 60)
result = test_func()
results.append((name, result))
# Summary
logger.info("\n" + "=" * 60)
logger.info("TEST SUMMARY")
logger.info("=" * 60)
for name, result in results:
status = "β
PASS" if result else "β FAIL"
logger.info(f"{status}: {name}")
all_passed = all(result for _, result in results)
logger.info("=" * 60)
if all_passed:
logger.info("π ALL TESTS PASSED - Ready for deployment!")
logger.info("\nNext steps:")
logger.info("1. Push to HuggingFace Spaces")
logger.info("2. Wait for model download (~30-60s)")
logger.info("3. Test MCP endpoint at /gradio_api/mcp/")
logger.info("4. Configure Claude Code MCP client")
return 0
else:
logger.error("β SOME TESTS FAILED - Fix issues before deployment")
return 1
if __name__ == "__main__":
sys.exit(main())
|