Spaces:
Sleeping
Sleeping
File size: 5,548 Bytes
33d3592 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 | # debug_ai_questions.py - Debug AI Questions issue
import os
import sys
from dotenv import load_dotenv
def debug_translator_state():
"""Debug translator state and configuration"""
print("π Debugging Translator State...")
print("=" * 60)
# Force reload environment
load_dotenv(override=True)
# Clear module cache
if 'translator' in sys.modules:
del sys.modules['translator']
# Fresh import
from translator import get_translator
translator = get_translator()
print(f"π OpenRouter Model: {translator.openrouter_model}")
print(f"π OpenRouter API Key: {translator.openrouter_api_key[:20]}...{translator.openrouter_api_key[-10:]}")
# Test OpenRouter directly
print("\nπ§ͺ Testing OpenRouter directly...")
try:
result, error = translator._openrouter_complete("Say hello in Arabic")
if result:
print(f"β
OpenRouter working: {result[:50]}...")
else:
print(f"β OpenRouter failed: {error}")
except Exception as e:
print(f"π₯ OpenRouter exception: {str(e)}")
return translator
def debug_ai_questions_engine():
"""Debug AI Questions engine"""
print("\nπ Debugging AI Questions Engine...")
print("=" * 60)
# Clear module cache
if 'ai_questions' in sys.modules:
del sys.modules['ai_questions']
# Fresh import
from ai_questions import get_ai_question_engine
engine = get_ai_question_engine()
print(f"π§ Engine translator model: {engine.translator.openrouter_model}")
# Test model availability
models_status = engine.check_model_availability()
print("\nπ Model Status:")
for model, status in models_status.items():
icon = status.get('icon', 'β')
available = status.get('available', False)
status_text = "β
Available" if available else "β Unavailable"
print(f" {icon} {model}: {status_text}")
return engine
def debug_question_processing():
"""Debug the actual question processing"""
print("\nπ Debugging Question Processing...")
print("=" * 60)
engine = debug_ai_questions_engine()
# Test with OpenRouter specifically
print("\nπ― Testing OpenRouter AI specifically...")
test_text = "Hello and what are you doing? This is a test and I want to test my voice."
test_question = "Explain this text in detail"
try:
# Test the _get_ai_response method directly
context = engine._prepare_question_context(
selected_text=test_text,
question=test_question,
session=None, # We'll pass None for this test
ui_language='en'
)
print(f"π Prepared context: {context[:100]}...")
# Test with preferred model = OpenRouter AI
response, error, model_used = engine._get_ai_response_with_model(
context=context,
ui_language='en',
preferred_model='OpenRouter AI'
)
if response:
print(f"β
OpenRouter AI response: {response[:100]}...")
print(f"π§ Model used: {model_used}")
else:
print(f"β OpenRouter AI failed: {error}")
# Try with auto model
print("\nπ Trying with auto model...")
response, error, model_used = engine._get_ai_response_with_model(
context=context,
ui_language='en',
preferred_model='auto'
)
if response:
print(f"β
Auto model response: {response[:100]}...")
print(f"π§ Model used: {model_used}")
else:
print(f"β Auto model also failed: {error}")
except Exception as e:
print(f"π₯ Question processing failed: {str(e)}")
import traceback
traceback.print_exc()
def debug_full_question_flow():
"""Debug the full question flow"""
print("\nπ Debugging Full Question Flow...")
print("=" * 60)
engine = debug_ai_questions_engine()
test_text = "Hello and what are you doing? This is a test and I want to test my voice."
test_question = "Explain this text in detail"
try:
response, error, session_id, model_used = engine.process_question(
selected_text=test_text,
question=test_question,
segment_info={"id": "test_segment"},
ui_language='en',
preferred_model='OpenRouter AI'
)
if response:
print(f"β
Full flow success: {response[:100]}...")
print(f"π§ Model used: {model_used}")
print(f"π Session ID: {session_id}")
else:
print(f"β Full flow failed: {error}")
except Exception as e:
print(f"π₯ Full flow exception: {str(e)}")
import traceback
traceback.print_exc()
def main():
"""Main debug function"""
print("π AI Questions Debug Tool")
print("=" * 60)
# Debug translator
translator = debug_translator_state()
# Debug AI questions engine
engine = debug_ai_questions_engine()
# Debug question processing
debug_question_processing()
# Debug full flow
debug_full_question_flow()
print("\n" + "=" * 60)
print("π― Debug Complete!")
if __name__ == "__main__":
main() |