File size: 6,942 Bytes
8634d5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594ca90
 
8634d5d
 
 
 
594ca90
 
8634d5d
 
 
 
 
594ca90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8634d5d
 
 
 
 
 
 
 
 
594ca90
 
8634d5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
#!/usr/bin/env python3
"""
Quick deployment test script for Simple AI Assistant
Run this to verify everything works before deploying to Hugging Face Spaces
"""

import sys
import importlib.util

def test_basic_imports():
    """Test if basic imports work"""
    print("πŸ” Testing basic imports...")
    
    try:
        import torch
        print(f"βœ… PyTorch {torch.__version__} imported successfully")
        
        # Check PyTorch version for security
        if torch.__version__ >= "2.6.0":
            print("βœ… PyTorch version is secure (2.6.0+)")
        else:
            print(f"⚠️  PyTorch version {torch.__version__} may have security issues. Upgrade to 2.6.0+")
        
    except ImportError as e:
        print(f"❌ PyTorch import failed: {e}")
        return False
    
    try:
        import transformers
        print(f"βœ… Transformers {transformers.__version__} imported successfully")
    except ImportError as e:
        print(f"❌ Transformers import failed: {e}")
        return False
    
    try:
        import gradio
        print(f"βœ… Gradio {gradio.__version__} imported successfully")
    except ImportError as e:
        print(f"❌ Gradio import failed: {e}")
        return False
    
    try:
        import numpy
        print(f"βœ… NumPy {numpy.__version__} imported successfully")
    except ImportError as e:
        print(f"❌ NumPy import failed: {e}")
        return False
    
    # Optional: Test autoawq (for Mistral model)
    try:
        import awq
        print(f"βœ… AutoAWQ imported successfully")
    except ImportError:
        print("⚠️  AutoAWQ not available - Mistral model will fall back to DialoGPT")
    
    return True

def test_model_loading():
    """Test if we can load the Mistral-7B-AWQ model"""
    print("\nπŸ€– Testing Mistral-7B-AWQ model loading...")
    
    try:
        from transformers import AutoModelForCausalLM, AutoTokenizer
        
        # Test Mistral-7B-AWQ (as requested)
        model_id = "TheBloke/Mistral-7B-Instruct-v0.2-AWQ"
        print(f"πŸ”„ Testing {model_id}...")
        
        tokenizer = AutoTokenizer.from_pretrained(model_id)
        print("βœ… Tokenizer loaded successfully")
        
        # Try AWQ model loading
        try:
            model = AutoModelForCausalLM.from_pretrained(
                model_id,
                device_map="auto" if torch.cuda.is_available() else "cpu",
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                low_cpu_mem_usage=True,
                trust_remote_code=True,
                use_safetensors=True  # Secure loading
            )
            print("βœ… AWQ model loaded successfully with device mapping!")
        except Exception as awq_error:
            print(f"⚠️ AWQ device mapping failed: {awq_error}")
            print("πŸ”„ Trying standard loading...")
            model = AutoModelForCausalLM.from_pretrained(
                model_id,
                torch_dtype=torch.float32,  # Use float32 for compatibility
                low_cpu_mem_usage=True,
                trust_remote_code=True,
                use_safetensors=True
            )
            print("βœ… AWQ model loaded successfully with standard loading!")
        
        # Test tokenization
        test_input = "Hello, how are you?"
        tokens = tokenizer.encode(test_input)
        print(f"βœ… Tokenization test passed ({len(tokens)} tokens)")
        
        return True
        
    except Exception as e:
        print(f"❌ Mistral model loading failed: {e}")
        print("πŸ’‘ Make sure autoawq>=0.1.8 is installed")
        return False

def test_emotion_detection():
    """Test emotion detection pipeline"""
    print("\n😊 Testing emotion detection...")
    
    try:
        from transformers import pipeline
        
        emotion_detector = pipeline(
            "sentiment-analysis",
            model="distilbert-base-uncased-finetuned-sst-2-english",
            return_all_scores=True
        )
        
        # Test emotion detection
        test_messages = [
            "I'm so happy today!",
            "I'm feeling really sad.",
            "The weather is okay."
        ]
        
        for msg in test_messages:
            result = emotion_detector(msg)
            print(f"βœ… '{msg}' -> {result[0][0]['label']}")
        
        print("βœ… Emotion detection working correctly")
        return True
        
    except Exception as e:
        print(f"❌ Emotion detection failed: {e}")
        return False

def test_gradio_interface():
    """Test if Gradio can create the interface"""
    print("\n🌐 Testing Gradio interface...")
    
    try:
        import gradio as gr
        
        # Test basic interface creation
        with gr.Blocks() as demo:
            gr.Markdown("# Test Interface")
            chatbot = gr.Chatbot()
            msg = gr.Textbox()
        
        print("βœ… Gradio interface created successfully")
        print("βœ… Ready for deployment!")
        return True
        
    except Exception as e:
        print(f"❌ Gradio interface test failed: {e}")
        return False

def main():
    """Run all tests"""
    print("πŸ§ͺ Simple AI Assistant Deployment Test")
    print("=" * 50)
    
    all_passed = True
    
    # Run tests
    tests = [
        ("Basic Imports", test_basic_imports),
        ("Model Loading", test_model_loading),
        ("Emotion Detection", test_emotion_detection),
        ("Gradio Interface", test_gradio_interface)
    ]
    
    for test_name, test_func in tests:
        print(f"\nπŸ“‹ Running {test_name} test...")
        try:
            if not test_func():
                all_passed = False
        except Exception as e:
            print(f"❌ {test_name} test crashed: {e}")
            all_passed = False
    
    print("\n" + "=" * 50)
    if all_passed:
        print("πŸŽ‰ ALL TESTS PASSED! Your app is ready for deployment!")
        print("\nπŸ“‹ Deployment Instructions:")
        print("1. Upload app.py and requirements.txt to Hugging Face Spaces")
        print("2. Set Space SDK to 'gradio'")
        print("3. Set Python version to 3.10+")
        print("4. Your app should build and run successfully!")
    else:
        print("❌ Some tests failed. Please fix the issues before deploying.")
        print("\nπŸ’‘ Troubleshooting:")
        print("- Try using requirements_minimal.txt if main requirements fail")
        print("- Check Python version (needs 3.10+)")
        print("- Verify internet connection for model downloads")
        
    return all_passed

if __name__ == "__main__":
    # Allow importing torch in test
    try:
        import torch
    except ImportError:
        print("❌ PyTorch not installed. Please install requirements first:")
        print("pip install -r requirements.txt")
        sys.exit(1)
    
    success = main()
    sys.exit(0 if success else 1)