lol040604lol's picture
Upload dashboard.py
eb63407 verified
"""import pandas as pd
import json
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import numpy as np
def analyze_data():
# Load JSON data
with open("session_data.json", "r") as file:
data = json.load(file)
# Extract data
interactions = data["interactions"]
timestamp = data.get("timestamp", "No Timestamp")
# Parse interactions into a DataFrame for analysis
interaction_data = []
for interaction in interactions:
sentiment = interaction["sentiment"]
if isinstance(sentiment, list) and len(sentiment) > 0:
sentiment_label = sentiment[0]["label"]
sentiment_score = sentiment[0]["score"]
elif isinstance(sentiment, dict):
sentiment_label = sentiment.get("label", "No Sentiment")
sentiment_score = sentiment.get("score", 0.0)
else:
sentiment_label = "No Sentiment"
sentiment_score = 0.0
sentiment_change = interaction["sentiment_change"]
recommendations = interaction["product_recommendations"]
objection = interaction["objection_handling"]
interaction_data.append({
"Transcription": interaction["transcription"],
"Sentiment Label": sentiment_label,
"Sentiment Score": sentiment_score,
"Sentiment Change": sentiment_change,
"Objection": objection[0] if objection else None,
"Objection Response": objection[1] if len(objection) > 1 else None,
"Recommendations": recommendations,
})
df = pd.DataFrame(interaction_data)
# Convert list of recommendations to a string (hashable type)
df['Recommendations_Str'] = df['Recommendations'].apply(lambda x: ', '.join([rec[0] for rec in x]) if isinstance(x, list) else str(x))
df['Interaction Count'] = df.groupby('Recommendations_Str')['Recommendations_Str'].transform('count')
df['CLV'] = df['Interaction Count'] * 10 # Example formula
# Display data
print(f"Data Timestamp: {timestamp}")
print("\nCustomer Interaction Summary:")
print(df)
# Insights: Sentiment Trends (Pie chart)
sentiment_counts = df["Sentiment Label"].value_counts()
fig, ax = plt.subplots(figsize=(4, 4))
sentiment_counts.plot(kind="pie", autopct="%1.1f%%", ax=ax)
ax.set_ylabel("")
plt.title("Sentiment Trends")
plt.show()
# Product Recommendations (Bar chart)
all_recommendations = [rec[0] for recs in df["Recommendations"] for rec in recs]
recommendation_counts = pd.Series(all_recommendations).value_counts()
fig, ax = plt.subplots(figsize=(5, 3))
recommendation_counts.plot(kind="bar", color="skyblue", ax=ax)
ax.set_title("Top Products")
plt.show()
# Predictive Modeling for Sentiment Scores
if len(df) > 1:
X = np.arange(len(df)).reshape(-1, 1)
y = df["Sentiment Score"].fillna(0).values
model = LinearRegression()
model.fit(X, y)
future_steps = np.arange(len(df), len(df) + 5).reshape(-1, 1)
predicted_scores = model.predict(future_steps)
print("\nPredicted Sentiment Scores (Next 5 Interactions):")
print(predicted_scores)
# Visualization
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(range(len(df)), y, label="Actual Scores", marker="o")
ax.plot(range(len(df), len(df) + 5), predicted_scores, label="Predicted Scores", linestyle="--", marker="o")
ax.set_title("Sentiment Score Trends")
ax.set_xlabel("Interaction Index")
ax.set_ylabel("Sentiment Score")
ax.legend()
plt.show()
# AI Recommendations for Sales Improvement
# print("\nAI Recommendations for Sales Improvement:")
#print("""
#1. Address objections related to pricing and promotions.
#2. Highlight the most recommended products to align with customer preferences.
#3. Use sentiment trends to identify weak conversation points.
#4. Optimize follow-ups based on predicted sentiment scores.
#""")
# Display Customer Lifetime Value (CLV)
#print("\nCustomer Lifetime Value (CLV):")
#print(df[['Recommendations_Str', 'Interaction Count', 'CLV']])
#if __name__ == "__main__":
#analyze_data()"""
import pandas as pd
import json
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import numpy as np
from wordcloud import WordCloud
def analyze_data(session_data):
# Extract data from the passed session data
interactions = session_data["interactions"]
timestamp = session_data.get("timestamp", "No Timestamp")
# Parse interactions into a DataFrame
interaction_data = []
all_transcriptions = []
for interaction in interactions:
sentiment = interaction["sentiment"]
if isinstance(sentiment, list) and len(sentiment) > 0:
sentiment_label = sentiment[0]["label"]
sentiment_score = sentiment[0]["score"]
elif isinstance(sentiment, dict):
sentiment_label = sentiment.get("label", "No Sentiment")
sentiment_score = sentiment.get("score", 0.0)
else:
sentiment_label = "No Sentiment"
sentiment_score = 0.0
recommendations = interaction["product_recommendations"]
objection = interaction["objection_handling"]
transcription = interaction["transcription"]
all_transcriptions.append(transcription)
interaction_data.append({
"Transcription": transcription,
"Sentiment Label": sentiment_label,
"Sentiment Score": sentiment_score,
"Objection": objection.get("objection") if objection else None,
"Objection Response": objection.get("response") if objection else None,
"Recommendations": recommendations,
})
df = pd.DataFrame(interaction_data)
# Handle insufficient data for sentiment predictions
if len(df) <= 1:
sentiment_predictions = "Insufficient data for predictions"
else:
# Predictive Modeling for Sentiment Scores
X = np.arange(len(df)).reshape(-1, 1)
y = df["Sentiment Score"].fillna(0).values
model = LinearRegression()
model.fit(X, y)
future_steps = np.arange(len(df), len(df) + 5).reshape(-1, 1)
sentiment_predictions = model.predict(future_steps)
# Generate Insights
sentiment_counts = df["Sentiment Label"].value_counts()
# Create a pie chart for sentiment trends
fig_sentiment, ax = plt.subplots(figsize=(4, 4))
sentiment_counts.plot(kind="pie", autopct="%1.1f%%", ax=ax)
ax.set_ylabel("")
plt.title("Sentiment Trends")
# Create a bar chart for product recommendations
all_recommendations = [rec[0] for recs in df["Recommendations"] for rec in recs]
recommendation_counts = pd.Series(all_recommendations).value_counts()
fig_recommendations, ax = plt.subplots(figsize=(5, 3))
recommendation_counts.plot(kind="bar", color="skyblue", ax=ax)
ax.set_title("Top Products")
# Generate a Word Cloud for Transcriptions
all_text = " ".join(all_transcriptions)
wordcloud = WordCloud(background_color="white").generate(all_text)
fig_wordcloud, ax = plt.subplots(figsize=(6, 4))
ax.imshow(wordcloud, interpolation="bilinear")
ax.axis("off")
plt.title("Call Topics")
# Actionable Recommendations
actionable_recommendations = [
"Focus on objection handling related to pricing concerns.",
"Highlight top-performing products during calls.",
"Leverage sentiment trends to adjust tone and messaging.",
"Plan follow-ups based on predicted sentiment trends."
]
# Return results for Streamlit display
return {
"timestamp": timestamp,
"summary_table": df,
"sentiment_chart": fig_sentiment,
"recommendation_chart": fig_recommendations,
"sentiment_predictions": sentiment_predictions,
"wordcloud": fig_wordcloud,
"actionable_recommendations": actionable_recommendations,
}