File size: 7,554 Bytes
b25b8f2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 | """
Step-by-step testing of all components
"""
print("="*60)
print("TESTING ALL COMPONENTS ONE BY ONE")
print("="*60)
# Test 1: Model 1 - SymbolicVerifier
print("\n[1/10] Testing Model 1: SymbolicVerifier")
try:
from models.symbolic_verifier import SymbolicVerifier
verifier = SymbolicVerifier()
# Test valid calculation
result1 = verifier.verify(["3 + 2 = 5"])
print(f" โ Valid test: {result1['verdict']} ({result1['confidence']*100:.0f}% confidence)")
# Test error calculation
result2 = verifier.verify(["5 - 1 = 6"])
print(f" โ Error test: {result2['verdict']} ({result2['confidence']*100:.0f}% confidence, {len(result2['errors'])} errors found)")
print(" โ
Model 1 PASSED")
except Exception as e:
print(f" โ Model 1 FAILED: {e}")
# Test 2: Model 2 - LLMLogicalChecker
print("\n[2/10] Testing Model 2: LLMLogicalChecker")
try:
from models.llm_logical_checker import LLMLogicalChecker
checker = LLMLogicalChecker("GPT-4")
result = checker.verify(["She buys 2 more: 3 + 2 = 5 apples"])
print(f" โ Test: {result['verdict']} ({result['confidence']*100:.0f}% confidence)")
print(f" โ Model name: {result['model_name']}")
print(" โ
Model 2 PASSED")
except Exception as e:
print(f" โ Model 2 FAILED: {e}")
# Test 3: Model 3 - EnsembleNeuralChecker
print("\n[3/10] Testing Model 3: EnsembleNeuralChecker")
try:
from models.ensemble_neural_checker import EnsembleNeuralChecker
ensemble = EnsembleNeuralChecker(["GPT-4", "Llama 2", "Gemini"])
result = ensemble.verify(["5 - 1 = 6"])
print(f" โ Test: {result['verdict']} ({result['confidence']*100:.0f}% confidence)")
print(f" โ Agreement: {result['agreement']}")
print(f" โ Sub-models: {result['sub_models']}")
print(" โ
Model 3 PASSED")
except Exception as e:
print(f" โ Model 3 FAILED: {e}")
# Test 4: Consensus Mechanism
print("\n[4/10] Testing Consensus Mechanism")
try:
from consensus.consensus_mechanism import compute_consensus
from models.symbolic_verifier import SymbolicVerifier
from models.llm_logical_checker import LLMLogicalChecker
from models.ensemble_neural_checker import EnsembleNeuralChecker
steps = ["5 - 1 = 6"]
symbolic = SymbolicVerifier()
llm = LLMLogicalChecker()
ensemble = EnsembleNeuralChecker()
r1 = symbolic.verify(steps)
r2 = llm.verify(steps)
r3 = ensemble.verify(steps)
consensus = compute_consensus(r1, r2, r3)
print(f" โ Final verdict: {consensus['final_verdict']}")
print(f" โ Confidence: {consensus['overall_confidence']*100:.1f}%")
print(f" โ Agreement: {consensus['agreement_type']}")
print(f" โ Error score: {consensus['error_score']:.3f}")
print(" โ
Consensus Mechanism PASSED")
except Exception as e:
print(f" โ Consensus Mechanism FAILED: {e}")
# Test 5: Error Classification
print("\n[5/10] Testing Error Classification")
try:
from utils.error_classifier import classify_error
error = {
"type": "calculation_error",
"found": "5 - 1 = 6",
"correct": "5 - 1 = 4",
"operation": "-",
"step_number": 1
}
classified = classify_error(error)
print(f" โ Category: {classified['category']}")
print(f" โ Severity: {classified['severity']}")
print(f" โ Fixable: {classified['fixable']}")
print(f" โ Fixability score: {classified['fixability_score']*100:.0f}%")
print(" โ
Error Classification PASSED")
except Exception as e:
print(f" โ Error Classification FAILED: {e}")
# Test 6: Explanation Generation
print("\n[6/10] Testing Explanation Generation")
try:
from utils.explanation_generator import generate_explanation
error = {
"type": "calculation_error",
"found": "5 - 1 = 6",
"correct": "5 - 1 = 4",
"operation": "-",
"step_number": 1
}
explanation = generate_explanation(error)
print(f" โ Explanation generated ({len(explanation)} chars)")
print(f" โ Preview: {explanation[:80]}...")
print(" โ
Explanation Generation PASSED")
except Exception as e:
print(f" โ Explanation Generation FAILED: {e}")
# Test 7: Error Correction
print("\n[7/10] Testing Error Correction")
try:
from utils.error_corrector import correct_solution
steps = ["She gives 1 away: 5 - 1 = 6 apples"]
errors = [{
"type": "calculation_error",
"found": "5 - 1 = 6",
"correct": "5 - 1 = 4",
"operation": "-",
"step_number": 1,
"fixable": True
}]
correction = correct_solution(steps, errors)
print(f" โ Fixed: {correction['fixed_count']}/{correction['total_fixable']} errors")
print(f" โ Success rate: {correction['success_rate']*100:.0f}%")
if correction['correction_log']:
print(f" โ Original: {correction['correction_log'][0]['original']}")
print(f" โ Corrected: {correction['correction_log'][0]['corrected']}")
print(" โ
Error Correction PASSED")
except Exception as e:
print(f" โ Error Correction FAILED: {e}")
# Test 8: Full Integration
print("\n[8/10] Testing Full Integration (Parallel Execution)")
try:
from core.verification_engine import run_verification_parallel
problem = "Janet has 3 apples. She buys 2 more. She gives 1 away. How many?"
steps = [
"Janet starts with 3 apples",
"She buys 2 more: 3 + 2 = 5 apples",
"She gives 1 away: 5 - 1 = 6 apples" # ERROR
]
result = run_verification_parallel(
problem=problem,
steps=steps,
model_name="GPT-4",
model_list=["GPT-4", "Llama 2", "Gemini"]
)
print(f" โ Processing time: {result['processing_time']:.2f}s")
print(f" โ Final verdict: {result['consensus']['final_verdict']}")
print(f" โ Confidence: {result['consensus']['overall_confidence']*100:.1f}%")
print(f" โ Errors found: {len(result['classified_errors'])}")
print(f" โ All 3 models executed: {len(result['model_results']) == 3}")
print(" โ
Full Integration PASSED")
except Exception as e:
print(f" โ Full Integration FAILED: {e}")
import traceback
traceback.print_exc()
# Test 9: Check Dependencies
print("\n[9/10] Checking Dependencies")
try:
import streamlit
import sympy
import pytest
print(" โ streamlit installed")
print(" โ sympy installed")
print(" โ pytest installed")
print(" โ
All Dependencies Available")
except ImportError as e:
print(f" โ ๏ธ Missing dependency: {e}")
print(" Run: pip install -r requirements.txt")
# Test 10: Import Check for App
print("\n[10/10] Testing App Imports")
try:
# Try importing what app.py needs
import streamlit as st
import time
from typing import List, Dict, Any
from core import run_verification_parallel
print(" โ All app imports successful")
print(" โ
App Ready to Run")
except Exception as e:
print(f" โ App Import FAILED: {e}")
print("\n" + "="*60)
print("TESTING COMPLETE")
print("="*60)
print("\nNext steps:")
print("1. Run: streamlit run app.py")
print("2. Or test: python run_example.py")
print("3. Or run tests: pytest")
|