File size: 3,777 Bytes
a742274 b01c03c 7b58ffd 6e45523 069a380 6e45523 7b58ffd 069a380 7b58ffd 069a380 78f9f72 069a380 7b58ffd 78f9f72 069a380 78f9f72 069a380 78f9f72 a742274 66a1846 f7e6017 069a380 f7e6017 069a380 f7e6017 069a380 f7e6017 069a380 a742274 78f9f72 a742274 78f9f72 a742274 78f9f72 a742274 78f9f72 a742274 213b71f c053822 a742274 78f9f72 c053822 78f9f72 213b71f c053822 78f9f72 213b71f 78f9f72 a742274 78f9f72 a742274 78f9f72 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import streamlit as st
import pandas as pd
import json
import joblib
from huggingface_hub import hf_hub_download
# Display the version of huggingface_hub
import huggingface_hub
st.write(f"Hugging Face Hub version: {huggingface_hub.__version__}")
# Function to download models from Hugging Face using huggingface_hub
def download_model_from_huggingface(repo_id, filename):
"""Downloads a model file from Hugging Face repository."""
try:
# Using huggingface_hub API to download model directly
model_file = hf_hub_download(repo_id, filename)
return model_file
except Exception as e:
st.error(f"Error downloading model {filename}: {e}")
return None
# Function to load models safely
def load_model(model_file):
"""Loads a model from a file."""
try:
# Here we're assuming the model is in a `.joblib` file format
return joblib.load(model_file)
except Exception as e:
st.error(f"Error loading model: {e}")
return None
# Set repository ID and model filenames
REPO_ID = "totoro74/Intelligent_Customer_Analyzer"
# Load models using the download function
try:
# Download and load the BERTopic model
bert_topic_model_file = download_model_from_huggingface(REPO_ID, "models/bertopic_model.joblib")
bert_topic_model = load_model(bert_topic_model_file)
# Download and load the Recommendation model
recommendation_model_file = download_model_from_huggingface(REPO_ID, "models/recommendation_model.joblib")
recommendation_model = load_model(recommendation_model_file)
except Exception as e:
st.error(f"β οΈ Error loading models: {e}")
# Streamlit app layout
st.title("π Intelligent Customer Feedback Analyzer")
st.write("Analyze customer feedback for sentiment, topics, and get personalized recommendations.")
# User input for customer feedback file
uploaded_file = st.file_uploader("π Upload a Feedback File (CSV, JSON, TXT)", type=["csv", "json", "txt"])
# Function to extract feedback text from different file formats
def extract_feedback(file):
"""Extracts text data from CSV, JSON, or TXT files."""
try:
if file.type == "text/csv":
df = pd.read_csv(file)
feedback_text = df.iloc[:, 0].dropna().astype(str).tolist()
return feedback_text
elif file.type == "application/json":
json_data = json.load(file)
feedback_text = [item.get('feedback', '') for item in json_data if isinstance(item, dict)]
return feedback_text
elif file.type == "text/plain":
return file.getvalue().decode("utf-8").split("\n")
else:
return ["Unsupported file type"]
except Exception as e:
st.error(f"Error processing file: {e}")
return []
# Process uploaded file
if uploaded_file:
feedback_text_list = extract_feedback(uploaded_file)
if feedback_text_list and bert_topic_model and recommendation_model:
for feedback_text in feedback_text_list:
with st.expander(f'π Analyze Feedback: "{feedback_text[:30]}..."'):
try:
# Topic Prediction
topics = bert_topic_model.predict([feedback_text])
st.write(f"**Predicted Topic(s):** {topics}")
# Recommendations
recommendations = recommendation_model.predict([feedback_text])
st.write(f"**Recommended Actions:** {recommendations}")
except Exception as e:
st.error(f"Error analyzing feedback: {e}")
else:
st.error("β οΈ Unable to analyze feedback. Please check if models are correctly loaded.")
else:
st.info("π Please upload a feedback file to analyze.")
|