Spaces:
Sleeping
Sleeping
| # debug_ai_questions.py - Debug AI Questions issue | |
| import os | |
| import sys | |
| from dotenv import load_dotenv | |
| def debug_translator_state(): | |
| """Debug translator state and configuration""" | |
| print("π Debugging Translator State...") | |
| print("=" * 60) | |
| # Force reload environment | |
| load_dotenv(override=True) | |
| # Clear module cache | |
| if 'translator' in sys.modules: | |
| del sys.modules['translator'] | |
| # Fresh import | |
| from translator import get_translator | |
| translator = get_translator() | |
| print(f"π OpenRouter Model: {translator.openrouter_model}") | |
| print(f"π OpenRouter API Key: {translator.openrouter_api_key[:20]}...{translator.openrouter_api_key[-10:]}") | |
| # Test OpenRouter directly | |
| print("\nπ§ͺ Testing OpenRouter directly...") | |
| try: | |
| result, error = translator._openrouter_complete("Say hello in Arabic") | |
| if result: | |
| print(f"β OpenRouter working: {result[:50]}...") | |
| else: | |
| print(f"β OpenRouter failed: {error}") | |
| except Exception as e: | |
| print(f"π₯ OpenRouter exception: {str(e)}") | |
| return translator | |
| def debug_ai_questions_engine(): | |
| """Debug AI Questions engine""" | |
| print("\nπ Debugging AI Questions Engine...") | |
| print("=" * 60) | |
| # Clear module cache | |
| if 'ai_questions' in sys.modules: | |
| del sys.modules['ai_questions'] | |
| # Fresh import | |
| from ai_questions import get_ai_question_engine | |
| engine = get_ai_question_engine() | |
| print(f"π§ Engine translator model: {engine.translator.openrouter_model}") | |
| # Test model availability | |
| models_status = engine.check_model_availability() | |
| print("\nπ Model Status:") | |
| for model, status in models_status.items(): | |
| icon = status.get('icon', 'β') | |
| available = status.get('available', False) | |
| status_text = "β Available" if available else "β Unavailable" | |
| print(f" {icon} {model}: {status_text}") | |
| return engine | |
| def debug_question_processing(): | |
| """Debug the actual question processing""" | |
| print("\nπ Debugging Question Processing...") | |
| print("=" * 60) | |
| engine = debug_ai_questions_engine() | |
| # Test with OpenRouter specifically | |
| print("\nπ― Testing OpenRouter AI specifically...") | |
| test_text = "Hello and what are you doing? This is a test and I want to test my voice." | |
| test_question = "Explain this text in detail" | |
| try: | |
| # Test the _get_ai_response method directly | |
| context = engine._prepare_question_context( | |
| selected_text=test_text, | |
| question=test_question, | |
| session=None, # We'll pass None for this test | |
| ui_language='en' | |
| ) | |
| print(f"π Prepared context: {context[:100]}...") | |
| # Test with preferred model = OpenRouter AI | |
| response, error, model_used = engine._get_ai_response_with_model( | |
| context=context, | |
| ui_language='en', | |
| preferred_model='OpenRouter AI' | |
| ) | |
| if response: | |
| print(f"β OpenRouter AI response: {response[:100]}...") | |
| print(f"π§ Model used: {model_used}") | |
| else: | |
| print(f"β OpenRouter AI failed: {error}") | |
| # Try with auto model | |
| print("\nπ Trying with auto model...") | |
| response, error, model_used = engine._get_ai_response_with_model( | |
| context=context, | |
| ui_language='en', | |
| preferred_model='auto' | |
| ) | |
| if response: | |
| print(f"β Auto model response: {response[:100]}...") | |
| print(f"π§ Model used: {model_used}") | |
| else: | |
| print(f"β Auto model also failed: {error}") | |
| except Exception as e: | |
| print(f"π₯ Question processing failed: {str(e)}") | |
| import traceback | |
| traceback.print_exc() | |
| def debug_full_question_flow(): | |
| """Debug the full question flow""" | |
| print("\nπ Debugging Full Question Flow...") | |
| print("=" * 60) | |
| engine = debug_ai_questions_engine() | |
| test_text = "Hello and what are you doing? This is a test and I want to test my voice." | |
| test_question = "Explain this text in detail" | |
| try: | |
| response, error, session_id, model_used = engine.process_question( | |
| selected_text=test_text, | |
| question=test_question, | |
| segment_info={"id": "test_segment"}, | |
| ui_language='en', | |
| preferred_model='OpenRouter AI' | |
| ) | |
| if response: | |
| print(f"β Full flow success: {response[:100]}...") | |
| print(f"π§ Model used: {model_used}") | |
| print(f"π Session ID: {session_id}") | |
| else: | |
| print(f"β Full flow failed: {error}") | |
| except Exception as e: | |
| print(f"π₯ Full flow exception: {str(e)}") | |
| import traceback | |
| traceback.print_exc() | |
| def main(): | |
| """Main debug function""" | |
| print("π AI Questions Debug Tool") | |
| print("=" * 60) | |
| # Debug translator | |
| translator = debug_translator_state() | |
| # Debug AI questions engine | |
| engine = debug_ai_questions_engine() | |
| # Debug question processing | |
| debug_question_processing() | |
| # Debug full flow | |
| debug_full_question_flow() | |
| print("\n" + "=" * 60) | |
| print("π― Debug Complete!") | |
| if __name__ == "__main__": | |
| main() |