Spaces:
Sleeping
Sleeping
jvroo
commited on
Commit
·
0cf6591
1
Parent(s):
e6fee1c
UI Change
Browse files
app.py
CHANGED
|
@@ -2,7 +2,9 @@ import gradio as gr
|
|
| 2 |
from transformers import pipeline
|
| 3 |
import matplotlib.pyplot as plt
|
| 4 |
import numpy as np
|
|
|
|
| 5 |
from huggingface_hub import InferenceClient
|
|
|
|
| 6 |
|
| 7 |
# Define models for local and remote inference
|
| 8 |
local_model = "distilbert-base-uncased-finetuned-sst-2-english"
|
|
@@ -11,8 +13,34 @@ remote_model = "siebert/sentiment-roberta-large-english" # You can use the same
|
|
| 11 |
# Load the local sentiment analysis pipeline with the specified model
|
| 12 |
local_pipeline = pipeline("sentiment-analysis", model=local_model)
|
| 13 |
|
| 14 |
-
# Initialize the inference
|
| 15 |
-
remote_inference_client = InferenceClient(remote_model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
# Function to perform sentiment analysis using the local pipeline
|
| 18 |
def local_sentiment_analysis(review):
|
|
@@ -28,17 +56,17 @@ def local_sentiment_analysis(review):
|
|
| 28 |
def remote_sentiment_analysis(review):
|
| 29 |
try:
|
| 30 |
# Make a request to the Hugging Face Inference API for text classification
|
| 31 |
-
response = remote_inference_client.text_classification(review
|
| 32 |
sentiment = response[0]['label']
|
| 33 |
score = response[0]['score']
|
| 34 |
return sentiment, score
|
| 35 |
except Exception as e:
|
| 36 |
-
return f"Error: {str(e)}", 0.0
|
| 37 |
|
| 38 |
-
# Function to analyze sentiment and
|
| 39 |
-
def analyze_sentiment(review, mode):
|
| 40 |
if not review.strip():
|
| 41 |
-
return "Error: Review text cannot be empty.", None
|
| 42 |
|
| 43 |
if mode == "Local Pipeline":
|
| 44 |
sentiment, score = local_sentiment_analysis(review)
|
|
@@ -47,11 +75,17 @@ def analyze_sentiment(review, mode):
|
|
| 47 |
sentiment, score = remote_sentiment_analysis(review)
|
| 48 |
model_info = f"Using remote model: {remote_model}"
|
| 49 |
else:
|
| 50 |
-
return "Invalid mode selected.", None
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
# Format the sentiment result
|
| 53 |
result_text = f"Sentiment: {sentiment}, Confidence: {score:.2f}\n{model_info}"
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
| 55 |
# Enhanced plot
|
| 56 |
fig, ax = plt.subplots(figsize=(8, 5))
|
| 57 |
|
|
@@ -73,7 +107,7 @@ def analyze_sentiment(review, mode):
|
|
| 73 |
textcoords="offset points",
|
| 74 |
ha='center', va='bottom')
|
| 75 |
|
| 76 |
-
return result_text, fig # Return the Matplotlib figure directly
|
| 77 |
|
| 78 |
# Custom CSS for styling
|
| 79 |
custom_css = """
|
|
@@ -140,6 +174,11 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 140 |
gr.Markdown("<h1>Movie Review Sentiment Analysis</h1>")
|
| 141 |
|
| 142 |
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
with gr.Row():
|
| 144 |
review_input = gr.Textbox(
|
| 145 |
label="Enter Movie Review", placeholder="Type your movie review here...", lines=4
|
|
@@ -153,13 +192,14 @@ with gr.Blocks(css=custom_css) as demo:
|
|
| 153 |
with gr.Row():
|
| 154 |
analyze_button = gr.Button("Analyze Sentiment")
|
| 155 |
|
| 156 |
-
# Output
|
| 157 |
sentiment_output = gr.Textbox(label="Sentiment Analysis Result", interactive=False)
|
| 158 |
-
|
|
|
|
| 159 |
plot_output = gr.Plot(label="Sentiment Score Graph")
|
| 160 |
|
| 161 |
-
analyze_button.click(analyze_sentiment, [review_input, mode_input], [sentiment_output, plot_output])
|
| 162 |
|
| 163 |
# Run the Gradio app
|
| 164 |
if __name__ == "__main__":
|
| 165 |
-
demo.launch(
|
|
|
|
| 2 |
from transformers import pipeline
|
| 3 |
import matplotlib.pyplot as plt
|
| 4 |
import numpy as np
|
| 5 |
+
import requests
|
| 6 |
from huggingface_hub import InferenceClient
|
| 7 |
+
import os
|
| 8 |
|
| 9 |
# Define models for local and remote inference
|
| 10 |
local_model = "distilbert-base-uncased-finetuned-sst-2-english"
|
|
|
|
| 13 |
# Load the local sentiment analysis pipeline with the specified model
|
| 14 |
local_pipeline = pipeline("sentiment-analysis", model=local_model)
|
| 15 |
|
| 16 |
+
# Initialize the inference client
|
| 17 |
+
remote_inference_client = InferenceClient(remote_model)
|
| 18 |
+
|
| 19 |
+
# OMDb API key (replace with your own API key)
|
| 20 |
+
OMDB_API_URL = 'http://www.omdbapi.com/'
|
| 21 |
+
# This is secret on Huggingface
|
| 22 |
+
api_key = os.getenv("OMDB")
|
| 23 |
+
OMDB_API_KEY = OpenAI(api_key=api_key)
|
| 24 |
+
|
| 25 |
+
# Function to fetch movie information from OMDb API
|
| 26 |
+
def fetch_movie_info(movie_name):
|
| 27 |
+
try:
|
| 28 |
+
response = requests.get(OMDB_API_URL, params={'t': movie_name, 'apikey': OMDB_API_KEY})
|
| 29 |
+
data = response.json()
|
| 30 |
+
if data['Response'] == 'True':
|
| 31 |
+
return {
|
| 32 |
+
'Title': data.get('Title', 'N/A'),
|
| 33 |
+
'Description': data.get('Plot', 'N/A'),
|
| 34 |
+
'Year': data.get('Year', 'N/A'),
|
| 35 |
+
'Director': data.get('Director', 'N/A'),
|
| 36 |
+
'Genre': data.get('Genre', 'N/A'),
|
| 37 |
+
'Actors': data.get('Actors', 'N/A'),
|
| 38 |
+
'Rating': data.get('imdbRating', 'N/A'),
|
| 39 |
+
}
|
| 40 |
+
else:
|
| 41 |
+
return {'Error': data.get('Error', 'Movie not found')}
|
| 42 |
+
except Exception as e:
|
| 43 |
+
return {'Error': str(e)}
|
| 44 |
|
| 45 |
# Function to perform sentiment analysis using the local pipeline
|
| 46 |
def local_sentiment_analysis(review):
|
|
|
|
| 56 |
def remote_sentiment_analysis(review):
|
| 57 |
try:
|
| 58 |
# Make a request to the Hugging Face Inference API for text classification
|
| 59 |
+
response = remote_inference_client.text_classification(review)
|
| 60 |
sentiment = response[0]['label']
|
| 61 |
score = response[0]['score']
|
| 62 |
return sentiment, score
|
| 63 |
except Exception as e:
|
| 64 |
+
return f"Error: {str(e)}", 0.0
|
| 65 |
|
| 66 |
+
# Function to analyze sentiment and fetch movie details
|
| 67 |
+
def analyze_sentiment(movie_name, review, mode):
|
| 68 |
if not review.strip():
|
| 69 |
+
return "Error: Review text cannot be empty.", None, None, None
|
| 70 |
|
| 71 |
if mode == "Local Pipeline":
|
| 72 |
sentiment, score = local_sentiment_analysis(review)
|
|
|
|
| 75 |
sentiment, score = remote_sentiment_analysis(review)
|
| 76 |
model_info = f"Using remote model: {remote_model}"
|
| 77 |
else:
|
| 78 |
+
return "Invalid mode selected.", None, None, None
|
| 79 |
+
|
| 80 |
+
# Fetch movie information
|
| 81 |
+
movie_info = fetch_movie_info(movie_name)
|
| 82 |
|
| 83 |
# Format the sentiment result
|
| 84 |
result_text = f"Sentiment: {sentiment}, Confidence: {score:.2f}\n{model_info}"
|
| 85 |
+
|
| 86 |
+
# Extract movie description
|
| 87 |
+
movie_description = movie_info.get('Description', 'N/A')
|
| 88 |
+
|
| 89 |
# Enhanced plot
|
| 90 |
fig, ax = plt.subplots(figsize=(8, 5))
|
| 91 |
|
|
|
|
| 107 |
textcoords="offset points",
|
| 108 |
ha='center', va='bottom')
|
| 109 |
|
| 110 |
+
return result_text, movie_description, movie_info, fig # Return the Matplotlib figure directly
|
| 111 |
|
| 112 |
# Custom CSS for styling
|
| 113 |
custom_css = """
|
|
|
|
| 174 |
gr.Markdown("<h1>Movie Review Sentiment Analysis</h1>")
|
| 175 |
|
| 176 |
with gr.Column():
|
| 177 |
+
with gr.Row():
|
| 178 |
+
movie_input = gr.Textbox(
|
| 179 |
+
label="Enter Movie Name", placeholder="Type the movie name here...", lines=1
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
with gr.Row():
|
| 183 |
review_input = gr.Textbox(
|
| 184 |
label="Enter Movie Review", placeholder="Type your movie review here...", lines=4
|
|
|
|
| 192 |
with gr.Row():
|
| 193 |
analyze_button = gr.Button("Analyze Sentiment")
|
| 194 |
|
| 195 |
+
# Output boxes
|
| 196 |
sentiment_output = gr.Textbox(label="Sentiment Analysis Result", interactive=False)
|
| 197 |
+
movie_description_output = gr.Textbox(label="Movie Description", interactive=False)
|
| 198 |
+
movie_info_output = gr.JSON(label="Movie Information")
|
| 199 |
plot_output = gr.Plot(label="Sentiment Score Graph")
|
| 200 |
|
| 201 |
+
analyze_button.click(analyze_sentiment, [movie_input, review_input, mode_input], [sentiment_output, movie_description_output, movie_info_output, plot_output])
|
| 202 |
|
| 203 |
# Run the Gradio app
|
| 204 |
if __name__ == "__main__":
|
| 205 |
+
demo.launch(share=True)
|