#!/usr/bin/env python3 """ Quick deployment test script for Simple AI Assistant Run this to verify everything works before deploying to Hugging Face Spaces """ import sys import importlib.util def test_basic_imports(): """Test if basic imports work""" print("๐Ÿ” Testing basic imports...") try: import torch print(f"โœ… PyTorch {torch.__version__} imported successfully") # Check PyTorch version for security if torch.__version__ >= "2.6.0": print("โœ… PyTorch version is secure (2.6.0+)") else: print(f"โš ๏ธ PyTorch version {torch.__version__} may have security issues. Upgrade to 2.6.0+") except ImportError as e: print(f"โŒ PyTorch import failed: {e}") return False try: import transformers print(f"โœ… Transformers {transformers.__version__} imported successfully") except ImportError as e: print(f"โŒ Transformers import failed: {e}") return False try: import gradio print(f"โœ… Gradio {gradio.__version__} imported successfully") except ImportError as e: print(f"โŒ Gradio import failed: {e}") return False try: import numpy print(f"โœ… NumPy {numpy.__version__} imported successfully") except ImportError as e: print(f"โŒ NumPy import failed: {e}") return False # Optional: Test autoawq (for Mistral model) try: import awq print(f"โœ… AutoAWQ imported successfully") except ImportError: print("โš ๏ธ AutoAWQ not available - Mistral model will fall back to DialoGPT") return True def test_model_loading(): """Test if we can load the Mistral-7B-AWQ model""" print("\n๐Ÿค– Testing Mistral-7B-AWQ model loading...") try: from transformers import AutoModelForCausalLM, AutoTokenizer # Test Mistral-7B-AWQ (as requested) model_id = "TheBloke/Mistral-7B-Instruct-v0.2-AWQ" print(f"๐Ÿ”„ Testing {model_id}...") tokenizer = AutoTokenizer.from_pretrained(model_id) print("โœ… Tokenizer loaded successfully") # Try AWQ model loading try: model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto" if torch.cuda.is_available() else "cpu", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, low_cpu_mem_usage=True, trust_remote_code=True, use_safetensors=True # Secure loading ) print("โœ… AWQ model loaded successfully with device mapping!") except Exception as awq_error: print(f"โš ๏ธ AWQ device mapping failed: {awq_error}") print("๐Ÿ”„ Trying standard loading...") model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float32, # Use float32 for compatibility low_cpu_mem_usage=True, trust_remote_code=True, use_safetensors=True ) print("โœ… AWQ model loaded successfully with standard loading!") # Test tokenization test_input = "Hello, how are you?" tokens = tokenizer.encode(test_input) print(f"โœ… Tokenization test passed ({len(tokens)} tokens)") return True except Exception as e: print(f"โŒ Mistral model loading failed: {e}") print("๐Ÿ’ก Make sure autoawq>=0.1.8 is installed") return False def test_emotion_detection(): """Test emotion detection pipeline""" print("\n๐Ÿ˜Š Testing emotion detection...") try: from transformers import pipeline emotion_detector = pipeline( "sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english", return_all_scores=True ) # Test emotion detection test_messages = [ "I'm so happy today!", "I'm feeling really sad.", "The weather is okay." ] for msg in test_messages: result = emotion_detector(msg) print(f"โœ… '{msg}' -> {result[0][0]['label']}") print("โœ… Emotion detection working correctly") return True except Exception as e: print(f"โŒ Emotion detection failed: {e}") return False def test_gradio_interface(): """Test if Gradio can create the interface""" print("\n๐ŸŒ Testing Gradio interface...") try: import gradio as gr # Test basic interface creation with gr.Blocks() as demo: gr.Markdown("# Test Interface") chatbot = gr.Chatbot() msg = gr.Textbox() print("โœ… Gradio interface created successfully") print("โœ… Ready for deployment!") return True except Exception as e: print(f"โŒ Gradio interface test failed: {e}") return False def main(): """Run all tests""" print("๐Ÿงช Simple AI Assistant Deployment Test") print("=" * 50) all_passed = True # Run tests tests = [ ("Basic Imports", test_basic_imports), ("Model Loading", test_model_loading), ("Emotion Detection", test_emotion_detection), ("Gradio Interface", test_gradio_interface) ] for test_name, test_func in tests: print(f"\n๐Ÿ“‹ Running {test_name} test...") try: if not test_func(): all_passed = False except Exception as e: print(f"โŒ {test_name} test crashed: {e}") all_passed = False print("\n" + "=" * 50) if all_passed: print("๐ŸŽ‰ ALL TESTS PASSED! Your app is ready for deployment!") print("\n๐Ÿ“‹ Deployment Instructions:") print("1. Upload app.py and requirements.txt to Hugging Face Spaces") print("2. Set Space SDK to 'gradio'") print("3. Set Python version to 3.10+") print("4. Your app should build and run successfully!") else: print("โŒ Some tests failed. Please fix the issues before deploying.") print("\n๐Ÿ’ก Troubleshooting:") print("- Try using requirements_minimal.txt if main requirements fail") print("- Check Python version (needs 3.10+)") print("- Verify internet connection for model downloads") return all_passed if __name__ == "__main__": # Allow importing torch in test try: import torch except ImportError: print("โŒ PyTorch not installed. Please install requirements first:") print("pip install -r requirements.txt") sys.exit(1) success = main() sys.exit(0 if success else 1)