File size: 3,935 Bytes
fd77f04
 
 
 
 
 
 
 
 
 
5a061ee
 
fd77f04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a061ee
 
 
 
 
 
 
fd77f04
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
"""

Quick test to verify HuggingFace Inference API works

Run this on HF Spaces to debug LLM connection issues

"""
import os
from llm_backend import LLMBackend, LLMProvider

def test_hf_connection():
    """Test HuggingFace Inference API connection"""
    print("="*60)
    print("Testing HuggingFace Inference Providers API Connection")
    print("(Updated Nov 2025 - New Endpoint)")
    print("="*60)

    # Check for token
    hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_API_KEY")
    if hf_token:
        print(f"βœ“ HF Token found: {hf_token[:10]}...")
    else:
        print("βœ— No HF Token found")
        print("  Set HF_TOKEN or HUGGINGFACE_API_KEY environment variable")
        return False

    # Initialize backend
    try:
        backend = LLMBackend(provider=LLMProvider.HUGGINGFACE, api_key=hf_token)
        print(f"βœ“ Backend initialized")
        print(f"  Provider: {backend.provider.value}")
        print(f"  Model: {backend.model}")
        print(f"  API URL: {backend.api_url}")

        # Verify we're using the new endpoint
        if "router.huggingface.co/hf-inference" in backend.api_url:
            print(f"  βœ“ Using new Inference Providers API endpoint")
        else:
            print(f"  ⚠️  WARNING: Not using new endpoint! Update llm_backend.py")

    except Exception as e:
        print(f"βœ— Backend initialization failed: {e}")
        return False

    # Test simple generation
    print("\nTesting simple message generation...")
    messages = [
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "Say 'Hello, World!' and nothing else."}
    ]

    try:
        response = backend.generate(messages, max_tokens=50, temperature=0.3)
        print(f"βœ“ Generation successful!")
        print(f"  Response: {response[:100]}")
        return True
    except Exception as e:
        print(f"βœ— Generation failed: {e}")
        import traceback
        traceback.print_exc()
        return False


def test_survey_generation():
    """Test actual survey generation"""
    print("\n" + "="*60)
    print("Testing Survey Generation")
    print("="*60)

    from survey_generator import SurveyGenerator

    hf_token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACE_API_KEY")
    if not hf_token:
        print("βœ— No HF Token, skipping")
        return False

    try:
        backend = LLMBackend(provider=LLMProvider.HUGGINGFACE, api_key=hf_token)
        generator = SurveyGenerator(backend)

        print("Generating a small test survey...")
        survey = generator.generate_survey(
            outline="Understand user satisfaction with a mobile app",
            survey_type="qualitative",
            num_questions=3,
            target_audience="Mobile app users"
        )

        print(f"βœ“ Survey generated!")
        print(f"  Title: {survey.get('title', 'N/A')}")
        print(f"  Questions: {len(survey.get('questions', []))}")
        return True

    except Exception as e:
        print(f"βœ— Survey generation failed: {e}")
        import traceback
        traceback.print_exc()
        return False


if __name__ == "__main__":
    # Run tests
    connection_ok = test_hf_connection()

    if connection_ok:
        survey_ok = test_survey_generation()
        if survey_ok:
            print("\n" + "="*60)
            print("βœ… ALL TESTS PASSED!")
            print("="*60)
        else:
            print("\n" + "="*60)
            print("⚠️  Basic connection works but survey generation failed")
            print("    This may be due to model limitations or rate limits")
            print("="*60)
    else:
        print("\n" + "="*60)
        print("❌ CONNECTION FAILED")
        print("   Check your HF_TOKEN and network connectivity")
        print("="*60)