Switched to FLAN-T5-Small and added UX improvements
Browse files- Dockerfile +21 -0
- app.py +111 -0
- requirements.txt +11 -0
Dockerfile
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use a suitable base image with Python installed
|
| 2 |
+
FROM python:3.9-slim
|
| 3 |
+
|
| 4 |
+
# Set the working directory inside the container to /app
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Copy all files from the current directory on the host to the container's /app directory
|
| 8 |
+
COPY . .
|
| 9 |
+
|
| 10 |
+
# Install Python dependencies listed in requirements.txt
|
| 11 |
+
RUN pip3 install -r requirements.txt
|
| 12 |
+
|
| 13 |
+
# --- FIX: Set Streamlit to use /tmp for configuration to avoid PermissionError ---
|
| 14 |
+
ENV STREAMLIT_SERVER_PORT=7860 \
|
| 15 |
+
STREAMLIT_SERVER_ADDRESS=0.0.0.0 \
|
| 16 |
+
STREAMLIT_BROWSER_GATHER_USAGE_STATS=false \
|
| 17 |
+
# This is the crucial fix for the PermissionError:
|
| 18 |
+
STREAMLIT_GLOBAL_DIR="/tmp/streamlit_global"
|
| 19 |
+
|
| 20 |
+
# Define the command to run the Streamlit app
|
| 21 |
+
CMD ["streamlit", "run", "app.py"]
|
app.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import torch
|
| 3 |
+
import time
|
| 4 |
+
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
| 5 |
+
|
| 6 |
+
# --- Configuration ---
|
| 7 |
+
# Using FLAN-T5-Small model (approx. 80MB) for fast startup
|
| 8 |
+
MODEL_NAME = "google/flan-t5-small"
|
| 9 |
+
|
| 10 |
+
# -------------------- Model Logic --------------------
|
| 11 |
+
|
| 12 |
+
# Define the system prompt with tuning incorporated
|
| 13 |
+
sys_prompt = """
|
| 14 |
+
Categorize the sentiment of the customer review as positive, negative, or neutral.
|
| 15 |
+
Leverage your expertise in the aviation industry and deep understanding of industry trends to analyze the nuanced expressions and overall tone.
|
| 16 |
+
It is crucial to accurately identify neutral sentiments, which may indicate a balanced view or neutral stance towards the airline. Neutral expressions could involve factual statements without explicit positive or negative opinions.
|
| 17 |
+
Consider the importance of these neutral sentiments in gauging the public sentiment towards the airline company.
|
| 18 |
+
For instance, a positive sentiment might convey satisfaction with the airline's services, a negative sentiment could express dissatisfaction, while neutral sentiment may reflect an impartial observation or a neutral standpoint
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
@st.cache_resource
|
| 22 |
+
def load_llm():
|
| 23 |
+
"""Loads the model and tokenizer with a friendly loading message."""
|
| 24 |
+
try:
|
| 25 |
+
with st.spinner(f"Loading tokenizer and model ({MODEL_NAME}). This is the slowest part!"):
|
| 26 |
+
# Determine the device
|
| 27 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 28 |
+
st.info(f"Using device: **{device.upper()}**. Starting model download...")
|
| 29 |
+
|
| 30 |
+
start_time = time.time()
|
| 31 |
+
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME)
|
| 32 |
+
# We explicitly place the model on the determined device
|
| 33 |
+
model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME).to(device)
|
| 34 |
+
|
| 35 |
+
end_time = time.time()
|
| 36 |
+
st.success(f"Model {MODEL_NAME} loaded successfully in {end_time - start_time:.2f} seconds!")
|
| 37 |
+
|
| 38 |
+
return tokenizer, model, device
|
| 39 |
+
|
| 40 |
+
except Exception as e:
|
| 41 |
+
st.error(f"FATAL ERROR LOADING MODEL: {e}")
|
| 42 |
+
st.info("Check the Space logs for dependency or memory errors.")
|
| 43 |
+
return None, None, None
|
| 44 |
+
|
| 45 |
+
def llm_response(tokenizer, model, device, prompt):
|
| 46 |
+
if tokenizer is None or model is None:
|
| 47 |
+
return "Model not initialized due to previous error."
|
| 48 |
+
|
| 49 |
+
# Process and move input to the correct device
|
| 50 |
+
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
| 51 |
+
|
| 52 |
+
# Generate response
|
| 53 |
+
outputs = model.generate(input_ids, max_length=150, do_sample=False)
|
| 54 |
+
|
| 55 |
+
# Decode and clean the output
|
| 56 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
| 57 |
+
|
| 58 |
+
def predict_review_sentiment(tokenizer, model, device, review):
|
| 59 |
+
"""Function to format the prompt and get the prediction."""
|
| 60 |
+
full_prompt = """
|
| 61 |
+
{}
|
| 62 |
+
Review text: '{}'
|
| 63 |
+
""".format(sys_prompt, review)
|
| 64 |
+
|
| 65 |
+
return llm_response(tokenizer, model, device, full_prompt)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# -------------------- Streamlit UI --------------------
|
| 69 |
+
|
| 70 |
+
# --- 1. Load Resources ---
|
| 71 |
+
tokenizer, model, device = load_llm()
|
| 72 |
+
|
| 73 |
+
# --- 2. System Info Message (Simplified) ---
|
| 74 |
+
st.sidebar.markdown("## 📊 System Info (Check Space Logs for Live Monitor)")
|
| 75 |
+
st.sidebar.info(
|
| 76 |
+
f"Model is: **{MODEL_NAME}**\n\n"
|
| 77 |
+
f"Device: **{device.upper()}**\n\n"
|
| 78 |
+
"Status: **App Fully Loaded**"
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
# --- 3. Main Application ---
|
| 82 |
+
st.title("✈️ Customer Review Sentiment Analyzer")
|
| 83 |
+
st.markdown(f"Using the ultra-lightweight **{MODEL_NAME}** model for fast sentiment classification.")
|
| 84 |
+
|
| 85 |
+
st.subheader("Enter the Customer Review:")
|
| 86 |
+
|
| 87 |
+
# Collect user input
|
| 88 |
+
review_text = st.text_area(
|
| 89 |
+
"Customer Review:",
|
| 90 |
+
height=150,
|
| 91 |
+
placeholder="E.g., I'm stuck on the tarmac for two hours. This is unacceptable! #SkyWingsfail"
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
# Predict button
|
| 95 |
+
if st.button("Predict Sentiment"):
|
| 96 |
+
if not review_text:
|
| 97 |
+
st.warning("Please enter a customer review to predict the sentiment.")
|
| 98 |
+
elif tokenizer is not None:
|
| 99 |
+
with st.spinner('Analyzing sentiment...'):
|
| 100 |
+
pred_sent = predict_review_sentiment(tokenizer, model, device, review_text)
|
| 101 |
+
|
| 102 |
+
# Display result with color coding
|
| 103 |
+
sentiment_text = pred_sent.lower()
|
| 104 |
+
if 'positive' in sentiment_text:
|
| 105 |
+
st.success(f"**Predicted Sentiment:** {pred_sent}")
|
| 106 |
+
elif 'negative' in sentiment_text:
|
| 107 |
+
st.error(f"**Predicted Sentiment:** {pred_sent}")
|
| 108 |
+
elif 'neutral' in sentiment_text:
|
| 109 |
+
st.info(f"**Predicted Sentiment:** {pred_sent}")
|
| 110 |
+
else:
|
| 111 |
+
st.warning(f"**Predicted Sentiment (Unknown Category):** {pred_sent}")
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core libraries
|
| 2 |
+
streamlit
|
| 3 |
+
pandas==2.2.2
|
| 4 |
+
numpy==2.0.2
|
| 5 |
+
|
| 6 |
+
# Hugging Face Transformers and LLM dependencies
|
| 7 |
+
transformers==4.56.2
|
| 8 |
+
sentencepiece==0.2.1
|
| 9 |
+
torch==2.8.0
|
| 10 |
+
accelerate==1.10.1
|
| 11 |
+
bitsandbytes==0.48.1
|