aics3experiment / app.py
omm7's picture
Update app.py
eebb9da verified
import streamlit as st
import time
# REMOVED: import os (Not needed as it's in Dockerfile)
from transformers import T5Tokenizer, TFT5ForConditionalGeneration
# --- Configuration (Unchanged) ---
MODEL_NAME = "google/flan-t5-large"
# -------------------- Model Logic --------------------
# CRITICAL FIX: Simplified and highly directive prompt for the smallest model
# sys_prompt = "Classify the sentiment of the following customer review as either 'positive', 'negative', or 'neutral'. Respond with only one word."
sys_prompt = """
Classify the sentiment of the following customer review as either 'positive', 'negative', or 'neutral'. Respond with only one word.
Leverage your expertise in the aviation industry and deep understanding of industry trends to analyze the nuanced expressions and overall tone.
It is crucial to accurately identify neutral sentiments, which may indicate a balanced view or neutral stance towards Us Airways. Neutral expressions could involve factual statements without explicit positive or negative opinions.
Consider the importance of these neutral sentiments in gauging the public sentiment towards the airline company.
For instance, a positive sentiment might convey satisfaction with the airline's services, a negative sentiment could express dissatisfaction, while neutral sentiment may reflect an impartial observation or a neutral standpoint
"""
@st.cache_resource
def load_llm():
# ... (load_llm function remains identical) ...
device = "CPU (TensorFlow)"
try:
with st.spinner(f"Loading tokenizer and model ({MODEL_NAME}) on {device}..."):
st.info(f"Using device: **{device}**. Starting model download...")
start_time = time.time()
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME)
model = TFT5ForConditionalGeneration.from_pretrained(MODEL_NAME)
end_time = time.time()
st.success(f"Model {MODEL_NAME} loaded successfully in {end_time - start_time:.2f} seconds!")
return tokenizer, model, device
except Exception as e:
st.error(f"FATAL ERROR LOADING MODEL: {e}")
st.info("Model load failed.")
return None, None, None
def llm_response(tokenizer, model, device, prompt):
if tokenizer is None or model is None:
return "Model not initialized due to previous error."
input_ids = tokenizer(prompt, return_tensors="tf").input_ids
# Set max_length=1 to force a single token output if possible, but 2 is safer for labels
outputs = model.generate(input_ids, max_length=3, do_sample=False)
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
def predict_review_sentiment(tokenizer, model, device, review):
"""
CLEANED PROMPT FORMATTING.
The final prompt sent to the model is simple:
"Classify the sentiment... Respond with only one word. Review: {review text}"
"""
# FIX: Combine the strict system prompt and the review text clearly
full_prompt = f"{sys_prompt} Review: '{review}'"
# Run the prediction and convert the output to standard casing
response = llm_response(tokenizer, model, device, full_prompt)
# Attempt to normalize the model's output to the three categories
normalized_response = response.lower().strip()
if "positive" in normalized_response:
return "Positive"
elif "negative" in normalized_response:
return "Negative"
elif "neutral" in normalized_response:
return "Neutral"
else:
# For non-classification outputs like 'hi', return the raw response
return response
# -------------------- Streamlit UI --------------------
# --- 1. Load Resources ---
tokenizer, model, device = load_llm()
# --- 2. System Info Message (Conditional Display) ---
st.sidebar.markdown("## ๐Ÿ“Š Deployment Status")
if tokenizer is not None:
st.sidebar.info(
f"Model: **{MODEL_NAME}**\n\n"
f"Framework: **{device}**\n\n"
"Status: **Local Inference Ready**"
)
# --- 3. Main Application ---
st.title("โœˆ๏ธ Customer Review Sentiment Analyzer")
st.markdown(f"Using the TensorFlow-backed **{MODEL_NAME}** model.")
st.subheader("Enter the Customer Review:")
review_text = st.text_area(
"Customer Review:",
height=150,
placeholder="E.g., The flight was delayed, but the crew was excellent."
)
# Predict button
if st.button("Predict Sentiment"):
if not review_text:
st.warning("Please enter a customer review to predict the sentiment.")
else:
with st.spinner('Analyzing sentiment...'):
pred_sent = predict_review_sentiment(tokenizer, model, device, review_text)
# Display result with color coding
sentiment_text = pred_sent.lower()
if 'positive' in sentiment_text:
st.success(f"**Predicted Sentiment:** {pred_sent}")
elif 'negative' in sentiment_text:
st.error(f"**Predicted Sentiment:** {pred_sent}")
elif 'neutral' in sentiment_text:
st.info(f"**Predicted Sentiment:** {pred_sent}")
else:
st.warning(f"**Predicted Sentiment (Model Response):** {pred_sent}")
else:
# Display a clear error on the main page if the model load failed
st.error("Application Failed to Initialize. Model load aborted (likely missing TensorFlow). Check Space logs.")