|
|
|
|
|
import streamlit as st |
|
|
import time |
|
|
|
|
|
from transformers import T5Tokenizer, TFT5ForConditionalGeneration |
|
|
|
|
|
|
|
|
MODEL_NAME = "google/flan-t5-small" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sys_prompt = """ |
|
|
Classify the sentiment of the following customer review as either 'positive', 'negative', or 'neutral'. Respond with only one word. |
|
|
Leverage your expertise in the aviation industry and deep understanding of industry trends to analyze the nuanced expressions and overall tone. |
|
|
It is crucial to accurately identify neutral sentiments, which may indicate a balanced view or neutral stance towards Us Airways. Neutral expressions could involve factual statements without explicit positive or negative opinions. |
|
|
Consider the importance of these neutral sentiments in gauging the public sentiment towards the airline company. |
|
|
For instance, a positive sentiment might convey satisfaction with the airline's services, a negative sentiment could express dissatisfaction, while neutral sentiment may reflect an impartial observation or a neutral standpoint |
|
|
""" |
|
|
@st.cache_resource |
|
|
def load_llm(): |
|
|
|
|
|
device = "CPU (TensorFlow)" |
|
|
try: |
|
|
with st.spinner(f"Loading tokenizer and model ({MODEL_NAME}) on {device}..."): |
|
|
st.info(f"Using device: **{device}**. Starting model download...") |
|
|
start_time = time.time() |
|
|
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME) |
|
|
model = TFT5ForConditionalGeneration.from_pretrained(MODEL_NAME) |
|
|
end_time = time.time() |
|
|
st.success(f"Model {MODEL_NAME} loaded successfully in {end_time - start_time:.2f} seconds!") |
|
|
return tokenizer, model, device |
|
|
except Exception as e: |
|
|
st.error(f"FATAL ERROR LOADING MODEL: {e}") |
|
|
st.info("Model load failed.") |
|
|
return None, None, None |
|
|
|
|
|
def llm_response(tokenizer, model, device, prompt): |
|
|
if tokenizer is None or model is None: |
|
|
return "Model not initialized due to previous error." |
|
|
|
|
|
input_ids = tokenizer(prompt, return_tensors="tf").input_ids |
|
|
|
|
|
outputs = model.generate(input_ids, max_length=3, do_sample=False) |
|
|
|
|
|
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip() |
|
|
|
|
|
def predict_review_sentiment(tokenizer, model, device, review): |
|
|
""" |
|
|
CLEANED PROMPT FORMATTING. |
|
|
The final prompt sent to the model is simple: |
|
|
"Classify the sentiment... Respond with only one word. Review: {review text}" |
|
|
""" |
|
|
|
|
|
full_prompt = f"{sys_prompt} Review: '{review}'" |
|
|
|
|
|
|
|
|
response = llm_response(tokenizer, model, device, full_prompt) |
|
|
|
|
|
|
|
|
normalized_response = response.lower().strip() |
|
|
|
|
|
if "positive" in normalized_response: |
|
|
return "Positive" |
|
|
elif "negative" in normalized_response: |
|
|
return "Negative" |
|
|
elif "neutral" in normalized_response: |
|
|
return "Neutral" |
|
|
else: |
|
|
|
|
|
return response |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer, model, device = load_llm() |
|
|
|
|
|
|
|
|
st.sidebar.markdown("## ๐ Deployment Status") |
|
|
|
|
|
if tokenizer is not None: |
|
|
st.sidebar.info( |
|
|
f"Model: **{MODEL_NAME}**\n\n" |
|
|
f"Framework: **{device}**\n\n" |
|
|
"Status: **Local Inference Ready**" |
|
|
) |
|
|
|
|
|
|
|
|
st.title("โ๏ธ Customer Review Sentiment Analyzer") |
|
|
st.markdown(f"Using the TensorFlow-backed **{MODEL_NAME}** model.") |
|
|
|
|
|
st.subheader("Enter the Customer Review:") |
|
|
|
|
|
review_text = st.text_area( |
|
|
"Customer Review:", |
|
|
height=150, |
|
|
placeholder="E.g., The flight was delayed, but the crew was excellent." |
|
|
) |
|
|
|
|
|
|
|
|
if st.button("Predict Sentiment"): |
|
|
if not review_text: |
|
|
st.warning("Please enter a customer review to predict the sentiment.") |
|
|
else: |
|
|
with st.spinner('Analyzing sentiment...'): |
|
|
pred_sent = predict_review_sentiment(tokenizer, model, device, review_text) |
|
|
|
|
|
|
|
|
sentiment_text = pred_sent.lower() |
|
|
if 'positive' in sentiment_text: |
|
|
st.success(f"**Predicted Sentiment:** {pred_sent}") |
|
|
elif 'negative' in sentiment_text: |
|
|
st.error(f"**Predicted Sentiment:** {pred_sent}") |
|
|
elif 'neutral' in sentiment_text: |
|
|
st.info(f"**Predicted Sentiment:** {pred_sent}") |
|
|
else: |
|
|
st.warning(f"**Predicted Sentiment (Model Response):** {pred_sent}") |
|
|
else: |
|
|
|
|
|
st.error("Application Failed to Initialize. Model load aborted (likely missing TensorFlow). Check Space logs.") |
|
|
|