Resume_classifier / pages /dashboard.py
sid22669's picture
Update pages/dashboard.py
0ce45a2 verified
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from statsmodels.stats.proportion import proportion_confint
# Set seaborn style
sns.set_theme(style="whitegrid")
# Set page config
st.set_page_config(page_title="Model Performance Dashboard", layout="wide")
# Title
st.title("πŸ“Š Model Performance Dashboard")
# Load data
log_file = "/tmp/corrections_log.csv"
if not log_file:
st.warning("⚠️ No data available.")
else:
df = pd.read_csv(log_file)
if df.empty:
st.info("ℹ️ No logs recorded yet.")
else:
# Layout columns
col1, col2 = st.columns(2)
with col1:
st.subheader("πŸ”Ή Role Prediction Distribution")
role_counts = df["model_prediction"].value_counts().sort_values(ascending=False)
fig, ax = plt.subplots(figsize=(8, 5))
palette = sns.color_palette("Set2", len(role_counts))
sns.barplot(x=role_counts.values, y=role_counts.index, ax=ax, palette=palette)
ax.set_xlabel("Count", fontsize=12)
ax.set_ylabel("Predicted Role", fontsize=12)
ax.set_title("Role Predictions Frequency", fontsize=14, weight='bold')
sns.despine(left=True, bottom=True)
st.pyplot(fig)
with col2:
correct_preds = df[df["model_prediction"] == df["corrected_prediction"]]
accuracy = len(correct_preds) / len(df) * 100
lower, upper = proportion_confint(count=len(correct_preds), nobs=len(df), alpha=0.05, method="wilson")
st.subheader("βœ… Model Accuracy Overview")
st.metric(label="Accuracy", value=f"{accuracy:.2f} %")
st.markdown(
f"""
<div style="font-size:16px;">
πŸ“Œ <strong>95% Confidence Interval:</strong><br>
{lower*100:.2f}% to {upper*100:.2f}%
</div>
""",
unsafe_allow_html=True
)
# Spacer
st.markdown("---")
# Logged data preview
st.subheader("πŸ“„ Recent Feedback Logs")
st.dataframe(
df[["timestamp", "model_prediction", "corrected_prediction"]].tail(10),
use_container_width=True,
height=300
)