aura-sentiment-final / validate.py
yashu100's picture
Final production upload with automated model card for Inference API
d22272e verified
# validate_upload.py
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# 1️⃣ Hugging Face repo name of your uploaded model
repo_name = "yashu100/Chatbot" # replace with your repo
try:
# 2️⃣ Load model and tokenizer directly from Hugging Face
model = AutoModelForSequenceClassification.from_pretrained(repo_name)
tokenizer = AutoTokenizer.from_pretrained(repo_name)
print("✅ Model and tokenizer loaded successfully from Hugging Face!")
# 3️⃣ Test with a sample input
sample_text = "This is a test sentence."
inputs = tokenizer(sample_text, return_tensors="pt")
outputs = model(**inputs)
print("Sample input processed successfully. Model output:")
print(outputs.logits)
except Exception as e:
print("❌ Failed to load the model. Error:")
print(e)
# file: push_final_model.py
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from huggingface_hub import HfApi, ModelCard, ModelCardData
import os
# --------------------------------------------------------------------------
# --- ACTION REQUIRED: CONFIGURE YOUR MODEL DETAILS HERE ---
# --------------------------------------------------------------------------
# 1. The local directory where your best final model is saved.
LOCAL_MODEL_DIR = "C:\\Users\\comte\\FINAL_90_PERCENT_MODEL" # Make sure this path is correct
# 2. The ID for your NEW, FINAL repository on the Hugging Face Hub.
# Use a new name to guarantee a clean slate.
# Format: "YourHuggingFaceUsername/YourNewModelName"
#
# ---> CHANGE "YourUsername" AND "aura-sentiment-final" BELOW <---
HUB_MODEL_ID = "yashu100/aura-sentiment-final"
# 3. Your Hugging Face Username
# This is needed to create the repository.
HF_USERNAME = "yashu100" # ---> CHANGE THIS <---
# 4. A commit message for the upload.
COMMIT_MESSAGE = "Final production upload with automated model card for Inference API"
# --------------------------------------------------------------------------
# --- Main Execution Script ---
if __name__ == "__main__":
print("--- Starting Final, Automated Push to Hub Script ---")
# --- Verify local model exists ---
if not os.path.exists(LOCAL_MODEL_DIR):
raise FileNotFoundError(f"Local model directory not found at '{LOCAL_MODEL_DIR}'.")
# --- Load local model and tokenizer ---
print(f"Loading model from: {LOCAL_MODEL_DIR}...")
tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_DIR)
model = AutoModelForSequenceClassification.from_pretrained(LOCAL_MODEL_DIR)
print("Model and tokenizer loaded successfully.")
# --- STEP 1: Programmatically create the model card with metadata ---
print("\nCreating model card with required metadata...")
card_data = ModelCardData(
language="en",
license="apache-2.0",
# This is the single most important tag for the Inference API
pipeline_tag="text-classification",
tags=["mental-health", "sentiment-analysis", "sih-project"],
model_name=f"Aura Sentiment Analysis Model ({HUB_MODEL_ID.split('/')[1]})"
)
card = ModelCard.from_template(card_data)
# --- STEP 2: Create the repository on the Hub ---
api = HfApi()
repo_url = api.create_repo(
repo_id=HUB_MODEL_ID,
repo_type="model",
exist_ok=True # Don't error if it already exists, just update it
)
print(f"Repository '{HUB_MODEL_ID}' created or already exists on the Hub.")
# --- STEP 3: Push the model, tokenizer, AND the new model card ---
print(f"\nUploading all files to: {HUB_MODEL_ID}")
# Save the generated model card to the local directory before upload
card.save(os.path.join(LOCAL_MODEL_DIR, 'README.md'))
# Upload the entire contents of the local directory
api.upload_folder(
folder_path=LOCAL_MODEL_DIR,
repo_id=HUB_MODEL_ID,
repo_type="model",
commit_message=COMMIT_MESSAGE
)
print("\n" + "="*50)
print("✅ SUCCESS! Your model is correctly deployed.")
print(f"You can view your model at: {repo_url}")
print("\nPlease wait 5-10 minutes for the Inference API to activate.")
print("="*50)