Switched to FLAN-T5-Small and added UX improvements
Browse files
app.py
CHANGED
|
@@ -1,36 +1,36 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
import torch
|
| 3 |
import time
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# --- Configuration ---
|
| 7 |
-
# Using FLAN-T5-Small model (approx. 80MB) for fast startup
|
| 8 |
MODEL_NAME = "google/flan-t5-small"
|
| 9 |
|
| 10 |
# -------------------- Model Logic --------------------
|
| 11 |
|
| 12 |
-
# Define the system prompt with tuning incorporated
|
| 13 |
sys_prompt = """
|
| 14 |
Categorize the sentiment of the customer review as positive, negative, or neutral.
|
| 15 |
-
|
| 16 |
-
It is crucial to accurately identify neutral sentiments, which may indicate a balanced view or neutral stance towards the airline. Neutral expressions could involve factual statements without explicit positive or negative opinions.
|
| 17 |
-
Consider the importance of these neutral sentiments in gauging the public sentiment towards the airline company.
|
| 18 |
-
For instance, a positive sentiment might convey satisfaction with the airline's services, a negative sentiment could express dissatisfaction, while neutral sentiment may reflect an impartial observation or a neutral standpoint
|
| 19 |
"""
|
| 20 |
|
| 21 |
@st.cache_resource
|
| 22 |
def load_llm():
|
| 23 |
-
"""Loads the model and tokenizer
|
|
|
|
|
|
|
|
|
|
| 24 |
try:
|
| 25 |
-
with st.spinner(f"Loading tokenizer and model ({MODEL_NAME})
|
| 26 |
-
|
| 27 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 28 |
-
st.info(f"Using device: **{device.upper()}**. Starting model download...")
|
| 29 |
|
| 30 |
start_time = time.time()
|
| 31 |
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME)
|
| 32 |
-
|
| 33 |
-
|
|
|
|
| 34 |
|
| 35 |
end_time = time.time()
|
| 36 |
st.success(f"Model {MODEL_NAME} loaded successfully in {end_time - start_time:.2f} seconds!")
|
|
@@ -39,15 +39,16 @@ def load_llm():
|
|
| 39 |
|
| 40 |
except Exception as e:
|
| 41 |
st.error(f"FATAL ERROR LOADING MODEL: {e}")
|
| 42 |
-
st.info("Check the
|
| 43 |
return None, None, None
|
| 44 |
|
| 45 |
def llm_response(tokenizer, model, device, prompt):
|
| 46 |
if tokenizer is None or model is None:
|
| 47 |
return "Model not initialized due to previous error."
|
| 48 |
|
| 49 |
-
# Process
|
| 50 |
-
|
|
|
|
| 51 |
|
| 52 |
# Generate response
|
| 53 |
outputs = model.generate(input_ids, max_length=150, do_sample=False)
|
|
@@ -56,12 +57,7 @@ def llm_response(tokenizer, model, device, prompt):
|
|
| 56 |
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
| 57 |
|
| 58 |
def predict_review_sentiment(tokenizer, model, device, review):
|
| 59 |
-
|
| 60 |
-
full_prompt = """
|
| 61 |
-
{}
|
| 62 |
-
Review text: '{}'
|
| 63 |
-
""".format(sys_prompt, review)
|
| 64 |
-
|
| 65 |
return llm_response(tokenizer, model, device, full_prompt)
|
| 66 |
|
| 67 |
|
|
@@ -70,42 +66,46 @@ def predict_review_sentiment(tokenizer, model, device, review):
|
|
| 70 |
# --- 1. Load Resources ---
|
| 71 |
tokenizer, model, device = load_llm()
|
| 72 |
|
| 73 |
-
# --- 2. System Info Message (
|
| 74 |
-
st.sidebar.markdown("## 📊
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
st.
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
if
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
|
|
|
| 2 |
import time
|
| 3 |
+
import os
|
| 4 |
+
from transformers import T5Tokenizer, TFT5ForConditionalGeneration # NEW: Using TF variant
|
| 5 |
+
|
| 6 |
+
# --- FIX 1: Set a safe cache directory to avoid PermissionError at /.cache ---
|
| 7 |
+
os.environ["HF_HOME"] = "/tmp/hf_cache"
|
| 8 |
|
| 9 |
# --- Configuration ---
|
|
|
|
| 10 |
MODEL_NAME = "google/flan-t5-small"
|
| 11 |
|
| 12 |
# -------------------- Model Logic --------------------
|
| 13 |
|
|
|
|
| 14 |
sys_prompt = """
|
| 15 |
Categorize the sentiment of the customer review as positive, negative, or neutral.
|
| 16 |
+
Respond ONLY with the sentiment word (positive, negative, or neutral). Do NOT include any other text.
|
|
|
|
|
|
|
|
|
|
| 17 |
"""
|
| 18 |
|
| 19 |
@st.cache_resource
|
| 20 |
def load_llm():
|
| 21 |
+
"""Loads the model and tokenizer without PyTorch."""
|
| 22 |
+
# Since we removed torch, device is always 'cpu' conceptually
|
| 23 |
+
device = "CPU (TensorFlow)"
|
| 24 |
+
|
| 25 |
try:
|
| 26 |
+
with st.spinner(f"Loading tokenizer and model ({MODEL_NAME}) on {device}..."):
|
| 27 |
+
st.info(f"Using device: **{device}**. Starting model download...")
|
|
|
|
|
|
|
| 28 |
|
| 29 |
start_time = time.time()
|
| 30 |
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME)
|
| 31 |
+
|
| 32 |
+
# CRUCIAL CHANGE: Load the TensorFlow version of the model
|
| 33 |
+
model = TFT5ForConditionalGeneration.from_pretrained(MODEL_NAME)
|
| 34 |
|
| 35 |
end_time = time.time()
|
| 36 |
st.success(f"Model {MODEL_NAME} loaded successfully in {end_time - start_time:.2f} seconds!")
|
|
|
|
| 39 |
|
| 40 |
except Exception as e:
|
| 41 |
st.error(f"FATAL ERROR LOADING MODEL: {e}")
|
| 42 |
+
st.info("Model load failed. Check the logs for missing TensorFlow dependencies.")
|
| 43 |
return None, None, None
|
| 44 |
|
| 45 |
def llm_response(tokenizer, model, device, prompt):
|
| 46 |
if tokenizer is None or model is None:
|
| 47 |
return "Model not initialized due to previous error."
|
| 48 |
|
| 49 |
+
# Process input (TensorFlow models automatically handle device placement on CPU)
|
| 50 |
+
# TF models use the 'tf' argument for return tensors
|
| 51 |
+
input_ids = tokenizer(prompt, return_tensors="tf").input_ids
|
| 52 |
|
| 53 |
# Generate response
|
| 54 |
outputs = model.generate(input_ids, max_length=150, do_sample=False)
|
|
|
|
| 57 |
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
| 58 |
|
| 59 |
def predict_review_sentiment(tokenizer, model, device, review):
|
| 60 |
+
full_prompt = f"{sys_prompt}\nReview text: '{review}'"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
return llm_response(tokenizer, model, device, full_prompt)
|
| 62 |
|
| 63 |
|
|
|
|
| 66 |
# --- 1. Load Resources ---
|
| 67 |
tokenizer, model, device = load_llm()
|
| 68 |
|
| 69 |
+
# --- 2. System Info Message (Conditional Display) ---
|
| 70 |
+
st.sidebar.markdown("## 📊 Deployment Status")
|
| 71 |
+
|
| 72 |
+
if tokenizer is not None:
|
| 73 |
+
st.sidebar.info(
|
| 74 |
+
f"Model: **{MODEL_NAME}**\n\n"
|
| 75 |
+
f"Framework: **{device}**\n\n"
|
| 76 |
+
"Status: **Local Inference Ready**"
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
# --- 3. Main Application ---
|
| 80 |
+
st.title("✈️ Customer Review Sentiment Analyzer")
|
| 81 |
+
st.markdown(f"Using the TensorFlow-backed **{MODEL_NAME}** model.")
|
| 82 |
+
|
| 83 |
+
st.subheader("Enter the Customer Review:")
|
| 84 |
+
|
| 85 |
+
review_text = st.text_area(
|
| 86 |
+
"Customer Review:",
|
| 87 |
+
height=150,
|
| 88 |
+
placeholder="E.g., The flight was delayed, but the crew was excellent."
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
# Predict button
|
| 92 |
+
if st.button("Predict Sentiment"):
|
| 93 |
+
if not review_text:
|
| 94 |
+
st.warning("Please enter a customer review to predict the sentiment.")
|
| 95 |
+
else:
|
| 96 |
+
with st.spinner('Analyzing sentiment...'):
|
| 97 |
+
pred_sent = predict_review_sentiment(tokenizer, model, device, review_text)
|
| 98 |
+
|
| 99 |
+
# Display result with color coding
|
| 100 |
+
sentiment_text = pred_sent.lower()
|
| 101 |
+
if 'positive' in sentiment_text:
|
| 102 |
+
st.success(f"**Predicted Sentiment:** {pred_sent}")
|
| 103 |
+
elif 'negative' in sentiment_text:
|
| 104 |
+
st.error(f"**Predicted Sentiment:** {pred_sent}")
|
| 105 |
+
elif 'neutral' in sentiment_text:
|
| 106 |
+
st.info(f"**Predicted Sentiment:** {pred_sent}")
|
| 107 |
+
else:
|
| 108 |
+
st.warning(f"**Predicted Sentiment (Model Response):** {pred_sent}")
|
| 109 |
+
else:
|
| 110 |
+
# Display a clear error on the main page if the model load failed
|
| 111 |
+
st.error("Application Failed to Initialize. Model load aborted (likely missing TensorFlow). Check Space logs.")
|