|
|
| from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
|
|
|
|
| repo_name = "yashu100/Chatbot"
|
|
|
| try:
|
|
|
| model = AutoModelForSequenceClassification.from_pretrained(repo_name)
|
| tokenizer = AutoTokenizer.from_pretrained(repo_name)
|
|
|
| print("✅ Model and tokenizer loaded successfully from Hugging Face!")
|
|
|
|
|
| sample_text = "This is a test sentence."
|
| inputs = tokenizer(sample_text, return_tensors="pt")
|
| outputs = model(**inputs)
|
|
|
| print("Sample input processed successfully. Model output:")
|
| print(outputs.logits)
|
|
|
| except Exception as e:
|
| print("❌ Failed to load the model. Error:")
|
| print(e)
|
|
|
|
|
|
|
| from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| from huggingface_hub import HfApi, ModelCard, ModelCardData
|
| import os
|
|
|
|
|
|
|
|
|
|
|
| LOCAL_MODEL_DIR = "C:\\Users\\comte\\FINAL_90_PERCENT_MODEL"
|
|
|
|
|
|
|
|
|
|
|
|
|
| HUB_MODEL_ID = "yashu100/aura-sentiment-final"
|
|
|
|
|
|
|
| HF_USERNAME = "yashu100"
|
|
|
|
|
| COMMIT_MESSAGE = "Final production upload with automated model card for Inference API"
|
|
|
|
|
|
|
|
|
| if __name__ == "__main__":
|
| print("--- Starting Final, Automated Push to Hub Script ---")
|
|
|
|
|
| if not os.path.exists(LOCAL_MODEL_DIR):
|
| raise FileNotFoundError(f"Local model directory not found at '{LOCAL_MODEL_DIR}'.")
|
|
|
|
|
| print(f"Loading model from: {LOCAL_MODEL_DIR}...")
|
| tokenizer = AutoTokenizer.from_pretrained(LOCAL_MODEL_DIR)
|
| model = AutoModelForSequenceClassification.from_pretrained(LOCAL_MODEL_DIR)
|
| print("Model and tokenizer loaded successfully.")
|
|
|
|
|
| print("\nCreating model card with required metadata...")
|
| card_data = ModelCardData(
|
| language="en",
|
| license="apache-2.0",
|
|
|
| pipeline_tag="text-classification",
|
| tags=["mental-health", "sentiment-analysis", "sih-project"],
|
| model_name=f"Aura Sentiment Analysis Model ({HUB_MODEL_ID.split('/')[1]})"
|
| )
|
|
|
| card = ModelCard.from_template(card_data)
|
|
|
|
|
| api = HfApi()
|
| repo_url = api.create_repo(
|
| repo_id=HUB_MODEL_ID,
|
| repo_type="model",
|
| exist_ok=True
|
| )
|
| print(f"Repository '{HUB_MODEL_ID}' created or already exists on the Hub.")
|
|
|
|
|
| print(f"\nUploading all files to: {HUB_MODEL_ID}")
|
|
|
|
|
| card.save(os.path.join(LOCAL_MODEL_DIR, 'README.md'))
|
|
|
|
|
| api.upload_folder(
|
| folder_path=LOCAL_MODEL_DIR,
|
| repo_id=HUB_MODEL_ID,
|
| repo_type="model",
|
| commit_message=COMMIT_MESSAGE
|
| )
|
|
|
| print("\n" + "="*50)
|
| print("✅ SUCCESS! Your model is correctly deployed.")
|
| print(f"You can view your model at: {repo_url}")
|
| print("\nPlease wait 5-10 minutes for the Inference API to activate.")
|
| print("="*50) |