Update app.py
Browse files
app.py
CHANGED
|
@@ -2,23 +2,19 @@ import streamlit as st
|
|
| 2 |
import pandas as pd
|
| 3 |
import json
|
| 4 |
import joblib
|
| 5 |
-
import
|
| 6 |
|
| 7 |
-
# Function to load
|
| 8 |
def load_model(model_path):
|
| 9 |
-
"""Loads the model
|
| 10 |
-
return
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
|
| 17 |
-
|
| 18 |
-
bert_topic_model_path = os.path.join(model_folder, 'bertopic_model.joblib')
|
| 19 |
-
recommendation_model_path = os.path.join(model_folder, 'recommendation_model.joblib')
|
| 20 |
-
|
| 21 |
-
# Load models
|
| 22 |
distilbert_model = load_model(distilbert_model_path)
|
| 23 |
bert_topic_model = load_model(bert_topic_model_path)
|
| 24 |
recommendation_model = load_model(recommendation_model_path)
|
|
@@ -40,11 +36,7 @@ def extract_feedback(file):
|
|
| 40 |
return feedback_text
|
| 41 |
elif file.type == "application/json":
|
| 42 |
json_data = json.load(file)
|
| 43 |
-
feedback_text = []
|
| 44 |
-
if isinstance(json_data, list):
|
| 45 |
-
feedback_text = [item.get('feedback', '') for item in json_data if 'feedback' in item]
|
| 46 |
-
elif isinstance(json_data, dict):
|
| 47 |
-
feedback_text = list(json_data.values())
|
| 48 |
return feedback_text
|
| 49 |
elif file.type == "text/plain":
|
| 50 |
return [file.getvalue().decode("utf-8")]
|
|
@@ -54,19 +46,17 @@ def extract_feedback(file):
|
|
| 54 |
# Display error or feedback extraction status
|
| 55 |
if uploaded_file:
|
| 56 |
feedback_text_list = extract_feedback(uploaded_file)
|
|
|
|
| 57 |
if feedback_text_list:
|
| 58 |
for feedback_text in feedback_text_list:
|
| 59 |
if st.button(f'Analyze Feedback: "{feedback_text[:30]}..."'):
|
| 60 |
-
# Sentiment Analysis
|
| 61 |
sentiment = distilbert_model.predict([feedback_text])
|
| 62 |
sentiment_result = 'Positive' if sentiment == 1 else 'Negative'
|
| 63 |
st.write(f"Sentiment: {sentiment_result}")
|
| 64 |
-
|
| 65 |
-
# Topic Modeling
|
| 66 |
topics = bert_topic_model.predict([feedback_text])
|
| 67 |
st.write(f"Predicted Topic(s): {topics}")
|
| 68 |
-
|
| 69 |
-
# Recommendation System
|
| 70 |
recommendations = recommendation_model.predict([feedback_text])
|
| 71 |
st.write(f"Recommended Actions: {recommendations}")
|
| 72 |
else:
|
|
|
|
| 2 |
import pandas as pd
|
| 3 |
import json
|
| 4 |
import joblib
|
| 5 |
+
import torch
|
| 6 |
|
| 7 |
+
# Function to load model safely on CPU
|
| 8 |
def load_model(model_path):
|
| 9 |
+
"""Loads the model, ensuring it is mapped to the CPU."""
|
| 10 |
+
return torch.load(model_path, map_location=torch.device('cpu'))
|
| 11 |
|
| 12 |
+
# Model file paths (ensure they are uploaded in the correct directory)
|
| 13 |
+
distilbert_model_path = "models/distilbert_model.joblib"
|
| 14 |
+
bert_topic_model_path = "models/bertopic_model.joblib"
|
| 15 |
+
recommendation_model_path = "models/recommendation_model.joblib"
|
| 16 |
|
| 17 |
+
# Load all models with CPU mapping
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
distilbert_model = load_model(distilbert_model_path)
|
| 19 |
bert_topic_model = load_model(bert_topic_model_path)
|
| 20 |
recommendation_model = load_model(recommendation_model_path)
|
|
|
|
| 36 |
return feedback_text
|
| 37 |
elif file.type == "application/json":
|
| 38 |
json_data = json.load(file)
|
| 39 |
+
feedback_text = [item.get('feedback', '') for item in json_data if isinstance(item, dict)]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
return feedback_text
|
| 41 |
elif file.type == "text/plain":
|
| 42 |
return [file.getvalue().decode("utf-8")]
|
|
|
|
| 46 |
# Display error or feedback extraction status
|
| 47 |
if uploaded_file:
|
| 48 |
feedback_text_list = extract_feedback(uploaded_file)
|
| 49 |
+
|
| 50 |
if feedback_text_list:
|
| 51 |
for feedback_text in feedback_text_list:
|
| 52 |
if st.button(f'Analyze Feedback: "{feedback_text[:30]}..."'):
|
|
|
|
| 53 |
sentiment = distilbert_model.predict([feedback_text])
|
| 54 |
sentiment_result = 'Positive' if sentiment == 1 else 'Negative'
|
| 55 |
st.write(f"Sentiment: {sentiment_result}")
|
| 56 |
+
|
|
|
|
| 57 |
topics = bert_topic_model.predict([feedback_text])
|
| 58 |
st.write(f"Predicted Topic(s): {topics}")
|
| 59 |
+
|
|
|
|
| 60 |
recommendations = recommendation_model.predict([feedback_text])
|
| 61 |
st.write(f"Recommended Actions: {recommendations}")
|
| 62 |
else:
|