|
|
|
|
|
""" |
|
|
Test script to verify Hugging Face API setup. |
|
|
|
|
|
Run this script to check if your Hugging Face API key is working correctly: |
|
|
python .\scripts\test_huggingface.py |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
def test_huggingface_api(): |
|
|
"""Test Hugging Face API connectivity.""" |
|
|
print("π Testing Hugging Face API connectivity...") |
|
|
|
|
|
|
|
|
api_key = os.getenv("HUGGINGFACE_API_KEY") |
|
|
if not api_key: |
|
|
print("β HUGGINGFACE_API_KEY environment variable is not set") |
|
|
print(" Please set your Hugging Face API key in the .env file") |
|
|
return False |
|
|
|
|
|
print(f"β
HUGGINGFACE_API_KEY is set (length: {len(api_key)} characters)") |
|
|
|
|
|
|
|
|
try: |
|
|
from langchain_huggingface import HuggingFaceEndpoint |
|
|
print("β
langchain_huggingface library is available") |
|
|
except ImportError as e: |
|
|
print(f"β Failed to import langchain_huggingface: {e}") |
|
|
print(" Install it with: pip install langchain-huggingface") |
|
|
return False |
|
|
|
|
|
|
|
|
TASK_MAPPING = { |
|
|
"microsoft/DialoGPT-large": "conversational", |
|
|
"HuggingFaceH4/zephyr-7b-beta": "conversational", |
|
|
"google/flan-t5-xxl": "text2text-generation", |
|
|
"google/flan-t5-xl": "text2text-generation", |
|
|
"google/flan-ul2": "text2text-generation", |
|
|
"bigscience/bloom": "text-generation", |
|
|
"gpt2": "text-generation", |
|
|
"mistralai/Mistral-7B-Instruct-v0.2": "text-generation", |
|
|
} |
|
|
|
|
|
|
|
|
models_to_try = [ |
|
|
(os.getenv("MODEL_NAME", "mistralai/Mistral-7B-Instruct-v0.2"), "text-generation"), |
|
|
("microsoft/DialoGPT-large", "conversational"), |
|
|
("google/flan-t5-xxl", "text2text-generation"), |
|
|
("HuggingFaceH4/zephyr-7b-beta", "conversational") |
|
|
] |
|
|
|
|
|
for model_name, task_type in models_to_try: |
|
|
print(f"π Testing model initialization with: {model_name} (task: {task_type})") |
|
|
|
|
|
try: |
|
|
llm = HuggingFaceEndpoint( |
|
|
repo_id=model_name, |
|
|
huggingfacehub_api_token=api_key, |
|
|
task=task_type, |
|
|
temperature=0.1, |
|
|
max_new_tokens=100 |
|
|
) |
|
|
print("β
Model initialized successfully") |
|
|
|
|
|
|
|
|
print("π Sending test request...") |
|
|
|
|
|
if task_type == "conversational": |
|
|
|
|
|
response = llm.invoke("Hello, how are you?") |
|
|
else: |
|
|
response = llm.invoke("Say 'Hello, FINESE SCHOOL!' in one word.") |
|
|
print(f"β
Test request successful") |
|
|
print(f" Response: {response.strip()}") |
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
print(f"β Failed with model {model_name}: {str(e)}") |
|
|
print(" Trying next model...\n") |
|
|
continue |
|
|
|
|
|
print("β All models failed. Please check your API key and network connection.") |
|
|
print("\nπ‘ Troubleshooting tips:") |
|
|
print(" 1. Check that your API key is valid") |
|
|
print(" 2. Verify you have internet connectivity") |
|
|
print(" 3. Check if there are any firewall restrictions") |
|
|
print(" 4. Make sure you haven't exceeded your rate limits") |
|
|
return False |
|
|
|
|
|
def main(): |
|
|
"""Main test function.""" |
|
|
print("π§ͺ FINESE SCHOOL Hugging Face API Test Script") |
|
|
print("=" * 50) |
|
|
|
|
|
success = test_huggingface_api() |
|
|
|
|
|
print("\nπ Summary") |
|
|
print("=" * 50) |
|
|
|
|
|
if success: |
|
|
print("β
Hugging Face API setup is working correctly!") |
|
|
print("\nπ You can now run the main application:") |
|
|
print(" streamlit run src/app.py") |
|
|
else: |
|
|
print("β Hugging Face API setup has issues.") |
|
|
print(" Please check the error messages above and fix the issues.") |
|
|
sys.exit(1) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |