SohaAyub's picture
Create app.py
08b12d9 verified
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
# Define the directory where the model and tokenizer are saved
model_dir = "./sentiment_model"
# Load the tokenizer and the fine-tuned model
tokenizer = AutoTokenizer.from_pretrained(model_dir)
model = AutoModelForSequenceClassification.from_pretrained(model_dir)
# Create a sentiment analysis pipeline
classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
# Define the prediction function for Gradio
def predict_sentiment(text):
if not text:
return "Please enter some text."
result = classifier(text)[0]
label = result['label']
score = result['score']
# Assuming 0 is negative and 1 is positive from the training (as per model output in previous steps)
# The pipeline itself often returns 'POSITIVE' or 'NEGATIVE' directly
if label == 'LABEL_1' or label == 'POSITIVE': # LABEL_1 usually maps to positive in binary classification with 0,1 labels
sentiment = "Positive"
elif label == 'LABEL_0' or label == 'NEGATIVE': # LABEL_0 usually maps to negative
sentiment = "Negative"
else:
sentiment = f"Unknown ({label})"
return f"Sentiment: {sentiment} (Score: {score:.2f})"
# Create the Gradio interface
iface = gr.Interface(
fn=predict_sentiment,
inputs=gr.Textbox(lines=5, label="Enter text for sentiment analysis"),
outputs=gr.Textbox(label="Sentiment Result"),
title="Sentiment Analysis App",
description="Enter a movie review or any text to get its sentiment (Positive/Negative)."
)
# Launch the Gradio interface
# This will run the Gradio app on a local server. If deploying to Hugging Face Spaces, it will be automatically launched.
iface.launch(share=True)
```