ProjectEcho / test_hf_backend.py
jmisak's picture
Upload 8 files
5a061ee verified
"""
Quick test to verify HuggingFace Inference API works
Run this on HF Spaces to debug LLM connection issues
"""
import os
from llm_backend import LLMBackend, LLMProvider
def test_hf_connection():
"""Test HuggingFace Inference API connection"""
print("="*60)
print("Testing HuggingFace Inference Providers API Connection")
print("(Updated Nov 2025 - New Endpoint)")
print("="*60)
# Check for token
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_API_KEY")
if hf_token:
print(f"βœ“ HF Token found: {hf_token[:10]}...")
else:
print("βœ— No HF Token found")
print(" Set HF_TOKEN or HUGGINGFACE_API_KEY environment variable")
return False
# Initialize backend
try:
backend = LLMBackend(provider=LLMProvider.HUGGINGFACE, api_key=hf_token)
print(f"βœ“ Backend initialized")
print(f" Provider: {backend.provider.value}")
print(f" Model: {backend.model}")
print(f" API URL: {backend.api_url}")
# Verify we're using the new endpoint
if "router.huggingface.co/hf-inference" in backend.api_url:
print(f" βœ“ Using new Inference Providers API endpoint")
else:
print(f" ⚠️ WARNING: Not using new endpoint! Update llm_backend.py")
except Exception as e:
print(f"βœ— Backend initialization failed: {e}")
return False
# Test simple generation
print("\nTesting simple message generation...")
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Say 'Hello, World!' and nothing else."}
]
try:
response = backend.generate(messages, max_tokens=50, temperature=0.3)
print(f"βœ“ Generation successful!")
print(f" Response: {response[:100]}")
return True
except Exception as e:
print(f"βœ— Generation failed: {e}")
import traceback
traceback.print_exc()
return False
def test_survey_generation():
"""Test actual survey generation"""
print("\n" + "="*60)
print("Testing Survey Generation")
print("="*60)
from survey_generator import SurveyGenerator
hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_API_KEY")
if not hf_token:
print("βœ— No HF Token, skipping")
return False
try:
backend = LLMBackend(provider=LLMProvider.HUGGINGFACE, api_key=hf_token)
generator = SurveyGenerator(backend)
print("Generating a small test survey...")
survey = generator.generate_survey(
outline="Understand user satisfaction with a mobile app",
survey_type="qualitative",
num_questions=3,
target_audience="Mobile app users"
)
print(f"βœ“ Survey generated!")
print(f" Title: {survey.get('title', 'N/A')}")
print(f" Questions: {len(survey.get('questions', []))}")
return True
except Exception as e:
print(f"βœ— Survey generation failed: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
# Run tests
connection_ok = test_hf_connection()
if connection_ok:
survey_ok = test_survey_generation()
if survey_ok:
print("\n" + "="*60)
print("βœ… ALL TESTS PASSED!")
print("="*60)
else:
print("\n" + "="*60)
print("⚠️ Basic connection works but survey generation failed")
print(" This may be due to model limitations or rate limits")
print("="*60)
else:
print("\n" + "="*60)
print("❌ CONNECTION FAILED")
print(" Check your HF_TOKEN and network connectivity")
print("="*60)