|
|
|
|
|
""" |
|
|
Quick deployment test script for Simple AI Assistant |
|
|
Run this to verify everything works before deploying to Hugging Face Spaces |
|
|
""" |
|
|
|
|
|
import sys |
|
|
import importlib.util |
|
|
|
|
|
def test_basic_imports(): |
|
|
"""Test if basic imports work""" |
|
|
print("π Testing basic imports...") |
|
|
|
|
|
try: |
|
|
import torch |
|
|
print(f"β
PyTorch {torch.__version__} imported successfully") |
|
|
|
|
|
|
|
|
if torch.__version__ >= "2.6.0": |
|
|
print("β
PyTorch version is secure (2.6.0+)") |
|
|
else: |
|
|
print(f"β οΈ PyTorch version {torch.__version__} may have security issues. Upgrade to 2.6.0+") |
|
|
|
|
|
except ImportError as e: |
|
|
print(f"β PyTorch import failed: {e}") |
|
|
return False |
|
|
|
|
|
try: |
|
|
import transformers |
|
|
print(f"β
Transformers {transformers.__version__} imported successfully") |
|
|
except ImportError as e: |
|
|
print(f"β Transformers import failed: {e}") |
|
|
return False |
|
|
|
|
|
try: |
|
|
import gradio |
|
|
print(f"β
Gradio {gradio.__version__} imported successfully") |
|
|
except ImportError as e: |
|
|
print(f"β Gradio import failed: {e}") |
|
|
return False |
|
|
|
|
|
try: |
|
|
import numpy |
|
|
print(f"β
NumPy {numpy.__version__} imported successfully") |
|
|
except ImportError as e: |
|
|
print(f"β NumPy import failed: {e}") |
|
|
return False |
|
|
|
|
|
|
|
|
try: |
|
|
import awq |
|
|
print(f"β
AutoAWQ imported successfully") |
|
|
except ImportError: |
|
|
print("β οΈ AutoAWQ not available - Mistral model will fall back to DialoGPT") |
|
|
|
|
|
return True |
|
|
|
|
|
def test_model_loading(): |
|
|
"""Test if we can load the Mistral-7B-AWQ model""" |
|
|
print("\nπ€ Testing Mistral-7B-AWQ model loading...") |
|
|
|
|
|
try: |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
|
|
|
model_id = "TheBloke/Mistral-7B-Instruct-v0.2-AWQ" |
|
|
print(f"π Testing {model_id}...") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
print("β
Tokenizer loaded successfully") |
|
|
|
|
|
|
|
|
try: |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_id, |
|
|
device_map="auto" if torch.cuda.is_available() else "cpu", |
|
|
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
|
|
low_cpu_mem_usage=True, |
|
|
trust_remote_code=True, |
|
|
use_safetensors=True |
|
|
) |
|
|
print("β
AWQ model loaded successfully with device mapping!") |
|
|
except Exception as awq_error: |
|
|
print(f"β οΈ AWQ device mapping failed: {awq_error}") |
|
|
print("π Trying standard loading...") |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_id, |
|
|
torch_dtype=torch.float32, |
|
|
low_cpu_mem_usage=True, |
|
|
trust_remote_code=True, |
|
|
use_safetensors=True |
|
|
) |
|
|
print("β
AWQ model loaded successfully with standard loading!") |
|
|
|
|
|
|
|
|
test_input = "Hello, how are you?" |
|
|
tokens = tokenizer.encode(test_input) |
|
|
print(f"β
Tokenization test passed ({len(tokens)} tokens)") |
|
|
|
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Mistral model loading failed: {e}") |
|
|
print("π‘ Make sure autoawq>=0.1.8 is installed") |
|
|
return False |
|
|
|
|
|
def test_emotion_detection(): |
|
|
"""Test emotion detection pipeline""" |
|
|
print("\nπ Testing emotion detection...") |
|
|
|
|
|
try: |
|
|
from transformers import pipeline |
|
|
|
|
|
emotion_detector = pipeline( |
|
|
"sentiment-analysis", |
|
|
model="distilbert-base-uncased-finetuned-sst-2-english", |
|
|
return_all_scores=True |
|
|
) |
|
|
|
|
|
|
|
|
test_messages = [ |
|
|
"I'm so happy today!", |
|
|
"I'm feeling really sad.", |
|
|
"The weather is okay." |
|
|
] |
|
|
|
|
|
for msg in test_messages: |
|
|
result = emotion_detector(msg) |
|
|
print(f"β
'{msg}' -> {result[0][0]['label']}") |
|
|
|
|
|
print("β
Emotion detection working correctly") |
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Emotion detection failed: {e}") |
|
|
return False |
|
|
|
|
|
def test_gradio_interface(): |
|
|
"""Test if Gradio can create the interface""" |
|
|
print("\nπ Testing Gradio interface...") |
|
|
|
|
|
try: |
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# Test Interface") |
|
|
chatbot = gr.Chatbot() |
|
|
msg = gr.Textbox() |
|
|
|
|
|
print("β
Gradio interface created successfully") |
|
|
print("β
Ready for deployment!") |
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Gradio interface test failed: {e}") |
|
|
return False |
|
|
|
|
|
def main(): |
|
|
"""Run all tests""" |
|
|
print("π§ͺ Simple AI Assistant Deployment Test") |
|
|
print("=" * 50) |
|
|
|
|
|
all_passed = True |
|
|
|
|
|
|
|
|
tests = [ |
|
|
("Basic Imports", test_basic_imports), |
|
|
("Model Loading", test_model_loading), |
|
|
("Emotion Detection", test_emotion_detection), |
|
|
("Gradio Interface", test_gradio_interface) |
|
|
] |
|
|
|
|
|
for test_name, test_func in tests: |
|
|
print(f"\nπ Running {test_name} test...") |
|
|
try: |
|
|
if not test_func(): |
|
|
all_passed = False |
|
|
except Exception as e: |
|
|
print(f"β {test_name} test crashed: {e}") |
|
|
all_passed = False |
|
|
|
|
|
print("\n" + "=" * 50) |
|
|
if all_passed: |
|
|
print("π ALL TESTS PASSED! Your app is ready for deployment!") |
|
|
print("\nπ Deployment Instructions:") |
|
|
print("1. Upload app.py and requirements.txt to Hugging Face Spaces") |
|
|
print("2. Set Space SDK to 'gradio'") |
|
|
print("3. Set Python version to 3.10+") |
|
|
print("4. Your app should build and run successfully!") |
|
|
else: |
|
|
print("β Some tests failed. Please fix the issues before deploying.") |
|
|
print("\nπ‘ Troubleshooting:") |
|
|
print("- Try using requirements_minimal.txt if main requirements fail") |
|
|
print("- Check Python version (needs 3.10+)") |
|
|
print("- Verify internet connection for model downloads") |
|
|
|
|
|
return all_passed |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
try: |
|
|
import torch |
|
|
except ImportError: |
|
|
print("β PyTorch not installed. Please install requirements first:") |
|
|
print("pip install -r requirements.txt") |
|
|
sys.exit(1) |
|
|
|
|
|
success = main() |
|
|
sys.exit(0 if success else 1) |
|
|
|