Spaces:
Sleeping
Sleeping
File size: 1,487 Bytes
425c345 0b983db 425c345 9bd8b16 425c345 0099c22 9bd8b16 221ea1a 9bd8b16 0b983db 9bd8b16 0b983db |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
import gradio as gr
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
#load model
model = AutoModelForSequenceClassification.from_pretrained("./")
tokenizer = AutoTokenizer.from_pretrained("./")
# Define the inference function
def predict_sentiment(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=256)
outputs = model(**inputs)
predicted_label = torch.argmax(outputs.logits).item()
label_map = {0: "Negative", 1: "Positive"}
return label_map[predicted_label]
# Gradio interface set-up
title = "Movie Review Sentiment Analysis"
description = ("Enter a movie review and find out whether it's Positive or Negative! "
"The fine-tuned distilbert-base-uncased model trained on the imdb dataset will try to classify your review.\n\n"
"Below are some examples you can try")
review = gr.Textbox(lines=10, label="Enter your movie review here...")
prediction = gr.Textbox(label="Sentiment Label (Prediction)")
examples = [
["The movie was amazing!"],
["I didn't like the movie, it was really boring and the cast was terrible"],
["Absolutely loved it! One of the best movies of all time"]
]
intf = gr.Interface(fn=predict_sentiment,
title=title,
description=description,
inputs=review,
outputs=prediction,
examples=examples)
intf.launch(inline=False)
|