chatbot1 / scripts /test_huggingface.py
Jack-ki1's picture
Upload 16 files
00bd2b1 verified
#!/usr/bin/env python3
"""
Test script to verify Hugging Face API setup.
Run this script to check if your Hugging Face API key is working correctly:
python .\scripts\test_huggingface.py
"""
import os
import sys
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
def test_huggingface_api():
"""Test Hugging Face API connectivity."""
print("πŸ” Testing Hugging Face API connectivity...")
# Check if API key is set
api_key = os.getenv("HUGGINGFACE_API_KEY")
if not api_key:
print("❌ HUGGINGFACE_API_KEY environment variable is not set")
print(" Please set your Hugging Face API key in the .env file")
return False
print(f"βœ… HUGGINGFACE_API_KEY is set (length: {len(api_key)} characters)")
# Try to import the required library
try:
from langchain_huggingface import HuggingFaceEndpoint
print("βœ… langchain_huggingface library is available")
except ImportError as e:
print(f"❌ Failed to import langchain_huggingface: {e}")
print(" Install it with: pip install langchain-huggingface")
return False
# Map model names to their appropriate task types
TASK_MAPPING = {
"microsoft/DialoGPT-large": "conversational",
"HuggingFaceH4/zephyr-7b-beta": "conversational",
"google/flan-t5-xxl": "text2text-generation",
"google/flan-t5-xl": "text2text-generation",
"google/flan-ul2": "text2text-generation",
"bigscience/bloom": "text-generation",
"gpt2": "text-generation",
"mistralai/Mistral-7B-Instruct-v0.2": "text-generation",
}
# List of models to try with their expected task types
models_to_try = [
(os.getenv("MODEL_NAME", "mistralai/Mistral-7B-Instruct-v0.2"), "text-generation"),
("microsoft/DialoGPT-large", "conversational"),
("google/flan-t5-xxl", "text2text-generation"),
("HuggingFaceH4/zephyr-7b-beta", "conversational")
]
for model_name, task_type in models_to_try:
print(f"πŸ” Testing model initialization with: {model_name} (task: {task_type})")
try:
llm = HuggingFaceEndpoint(
repo_id=model_name,
huggingfacehub_api_token=api_key,
task=task_type, # Specify the correct task type
temperature=0.1,
max_new_tokens=100
)
print("βœ… Model initialized successfully")
# Test a simple prompt
print("πŸ” Sending test request...")
# Use appropriate prompt format based on task type
if task_type == "conversational":
# For conversational models, we need to format the input as conversation
response = llm.invoke("Hello, how are you?")
else:
response = llm.invoke("Say 'Hello, FINESE SCHOOL!' in one word.")
print(f"βœ… Test request successful")
print(f" Response: {response.strip()}")
return True
except Exception as e:
print(f"❌ Failed with model {model_name}: {str(e)}")
print(" Trying next model...\n")
continue
print("❌ All models failed. Please check your API key and network connection.")
print("\nπŸ’‘ Troubleshooting tips:")
print(" 1. Check that your API key is valid")
print(" 2. Verify you have internet connectivity")
print(" 3. Check if there are any firewall restrictions")
print(" 4. Make sure you haven't exceeded your rate limits")
return False
def main():
"""Main test function."""
print("πŸ§ͺ FINESE SCHOOL Hugging Face API Test Script")
print("=" * 50)
success = test_huggingface_api()
print("\nπŸ“‹ Summary")
print("=" * 50)
if success:
print("βœ… Hugging Face API setup is working correctly!")
print("\nπŸš€ You can now run the main application:")
print(" streamlit run src/app.py")
else:
print("❌ Hugging Face API setup has issues.")
print(" Please check the error messages above and fix the issues.")
sys.exit(1)
if __name__ == "__main__":
main()