Spaces:
Sleeping
Sleeping
| """ | |
| Quick test to verify HuggingFace Inference API works | |
| Run this on HF Spaces to debug LLM connection issues | |
| """ | |
| import os | |
| from llm_backend import LLMBackend, LLMProvider | |
| def test_hf_connection(): | |
| """Test HuggingFace Inference API connection""" | |
| print("="*60) | |
| print("Testing HuggingFace Inference Providers API Connection") | |
| print("(Updated Nov 2025 - New Endpoint)") | |
| print("="*60) | |
| # Check for token | |
| hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_API_KEY") | |
| if hf_token: | |
| print(f"β HF Token found: {hf_token[:10]}...") | |
| else: | |
| print("β No HF Token found") | |
| print(" Set HF_TOKEN or HUGGINGFACE_API_KEY environment variable") | |
| return False | |
| # Initialize backend | |
| try: | |
| backend = LLMBackend(provider=LLMProvider.HUGGINGFACE, api_key=hf_token) | |
| print(f"β Backend initialized") | |
| print(f" Provider: {backend.provider.value}") | |
| print(f" Model: {backend.model}") | |
| print(f" API URL: {backend.api_url}") | |
| # Verify we're using the new endpoint | |
| if "router.huggingface.co/hf-inference" in backend.api_url: | |
| print(f" β Using new Inference Providers API endpoint") | |
| else: | |
| print(f" β οΈ WARNING: Not using new endpoint! Update llm_backend.py") | |
| except Exception as e: | |
| print(f"β Backend initialization failed: {e}") | |
| return False | |
| # Test simple generation | |
| print("\nTesting simple message generation...") | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful assistant."}, | |
| {"role": "user", "content": "Say 'Hello, World!' and nothing else."} | |
| ] | |
| try: | |
| response = backend.generate(messages, max_tokens=50, temperature=0.3) | |
| print(f"β Generation successful!") | |
| print(f" Response: {response[:100]}") | |
| return True | |
| except Exception as e: | |
| print(f"β Generation failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return False | |
| def test_survey_generation(): | |
| """Test actual survey generation""" | |
| print("\n" + "="*60) | |
| print("Testing Survey Generation") | |
| print("="*60) | |
| from survey_generator import SurveyGenerator | |
| hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_API_KEY") | |
| if not hf_token: | |
| print("β No HF Token, skipping") | |
| return False | |
| try: | |
| backend = LLMBackend(provider=LLMProvider.HUGGINGFACE, api_key=hf_token) | |
| generator = SurveyGenerator(backend) | |
| print("Generating a small test survey...") | |
| survey = generator.generate_survey( | |
| outline="Understand user satisfaction with a mobile app", | |
| survey_type="qualitative", | |
| num_questions=3, | |
| target_audience="Mobile app users" | |
| ) | |
| print(f"β Survey generated!") | |
| print(f" Title: {survey.get('title', 'N/A')}") | |
| print(f" Questions: {len(survey.get('questions', []))}") | |
| return True | |
| except Exception as e: | |
| print(f"β Survey generation failed: {e}") | |
| import traceback | |
| traceback.print_exc() | |
| return False | |
| if __name__ == "__main__": | |
| # Run tests | |
| connection_ok = test_hf_connection() | |
| if connection_ok: | |
| survey_ok = test_survey_generation() | |
| if survey_ok: | |
| print("\n" + "="*60) | |
| print("β ALL TESTS PASSED!") | |
| print("="*60) | |
| else: | |
| print("\n" + "="*60) | |
| print("β οΈ Basic connection works but survey generation failed") | |
| print(" This may be due to model limitations or rate limits") | |
| print("="*60) | |
| else: | |
| print("\n" + "="*60) | |
| print("β CONNECTION FAILED") | |
| print(" Check your HF_TOKEN and network connectivity") | |
| print("="*60) | |