Spaces:
Running on CPU Upgrade
Running on CPU Upgrade
| import streamlit as st | |
| import streamlit_authenticator as stauth | |
| from pathlib import Path | |
| import sys | |
| import json | |
| import time | |
| import signal | |
| import pandas as pd | |
| import subprocess | |
| import os | |
| from datetime import datetime, timezone | |
| from dotenv import load_dotenv | |
| from huggingface_upload import upload_all_to_huggingface | |
| load_dotenv() | |
| # Allow imports of project modules | |
| sys.path.insert(0, str(Path(__file__).parent.parent)) | |
| from user_management import HuggingFaceUserManager, load_user_config | |
| from data_updating_scripts.pipeline_status import PipelineStatus, SCRIPT_DISPLAY_NAMES | |
| st.set_page_config(page_title="Admin Panel", layout="wide", page_icon="π οΈ", initial_sidebar_state="expanded") | |
| st.markdown(""" | |
| <style> | |
| @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&display=swap'); | |
| /* ββ Base ββ */ | |
| html, body, [class*="css"], .stMarkdown, .stText, button, input, select, textarea { | |
| font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif !important; | |
| } | |
| /* ββ Hide sidebar ββ */ | |
| [data-testid="collapsedControl"], | |
| [data-testid="stSidebarCollapsedControl"] { display: none !important; } | |
| section[data-testid="stSidebar"] { display: none !important; } | |
| .main .block-container { max-width: 100% !important; padding-left: 2rem !important; padding-right: 2rem !important; } | |
| /* ββ Page background ββ */ | |
| .stApp { background: #FAF9F7 !important; } | |
| /* Hide Streamlit's top header bar (dark strip) */ | |
| header[data-testid="stHeader"] { | |
| background: #FAF9F7 !important; | |
| } | |
| .stAppToolbar, [data-testid="stToolbar"] { | |
| background: #FAF9F7 !important; | |
| } | |
| /* ββ Force dark text everywhere (override Streamlit dark theme) ββ */ | |
| .stApp, | |
| .stApp [data-testid="stVerticalBlock"], | |
| .stApp [data-testid="stHorizontalBlock"], | |
| .stApp .stMarkdown, | |
| .stApp .stMarkdown p, | |
| .stApp .stMarkdown li, | |
| .stApp .stMarkdown h1, | |
| .stApp .stMarkdown h2, | |
| .stApp .stMarkdown h3, | |
| .stApp .stMarkdown h4, | |
| .stApp .stMarkdown h5, | |
| .stApp .stMarkdown h6, | |
| .stApp .stMarkdown span, | |
| .stApp .stMarkdown strong, | |
| .stApp .stMarkdown em, | |
| .stApp .stText, | |
| .stApp p, | |
| .stApp span, | |
| .stApp label, | |
| .stApp .stCaption, | |
| .stApp [data-testid="stCaption"], | |
| .stApp [data-testid="stText"], | |
| .stApp [data-testid="stMarkdownContainer"], | |
| .stApp [data-testid="stMarkdownContainer"] p, | |
| .stApp [data-testid="stMarkdownContainer"] li, | |
| .stApp [data-testid="stMarkdownContainer"] h1, | |
| .stApp [data-testid="stMarkdownContainer"] h2, | |
| .stApp [data-testid="stMarkdownContainer"] h3, | |
| .stApp [data-testid="stMarkdownContainer"] h4, | |
| .stApp [data-testid="stMarkdownContainer"] span, | |
| .stApp [data-testid="stMarkdownContainer"] strong, | |
| .stApp [data-testid="stExpander"] summary, | |
| .stApp [data-testid="stExpander"] summary span, | |
| .stApp [data-testid="stWidgetLabel"] label, | |
| .stApp [data-testid="stWidgetLabel"] p, | |
| .stApp div[data-testid="stSelectbox"] label, | |
| .stApp div[data-testid="stMultiSelect"] label, | |
| .stApp div[data-testid="stTextInput"] label, | |
| .stApp div[data-testid="stTextArea"] label, | |
| .stApp div[data-testid="stRadio"] label, | |
| .stApp div[data-testid="stRadio"] div[role="radiogroup"] label, | |
| .stApp div[data-testid="stCheckbox"] label { | |
| color: #0F172A !important; | |
| } | |
| /* Metric containers */ | |
| .stApp div[data-testid="metric-container"], | |
| .stApp div[data-testid="metric-container"] label, | |
| .stApp div[data-testid="metric-container"] div, | |
| .stApp div[data-testid="metric-container"] p { | |
| color: #0F172A !important; | |
| background: #FFFFFF !important; | |
| } | |
| .stApp div[data-testid="metric-container"] [data-testid="stMetricLabel"] { | |
| color: #64748B !important; | |
| } | |
| /* ALL inputs/selects/textareas β force white bg + dark text globally */ | |
| .stApp [data-baseweb="select"] > div, | |
| .stApp [data-baseweb="input"], | |
| .stApp [data-baseweb="input"] > div, | |
| .stApp [data-baseweb="base-input"], | |
| .stApp [data-baseweb="base-input"] > div, | |
| .stApp .stTextInput > div > div, | |
| .stApp .stTextArea > div > div, | |
| .stApp .stSelectbox > div > div, | |
| .stApp .stMultiSelect > div > div { | |
| background: #FFFFFF !important; | |
| border-color: #CBD5E1 !important; | |
| } | |
| .stApp input, | |
| .stApp textarea, | |
| .stApp [data-baseweb="select"] span, | |
| .stApp [data-baseweb="select"] div, | |
| .stApp [data-baseweb="input"] input, | |
| .stApp [data-baseweb="base-input"] input { | |
| color: #0F172A !important; | |
| background: #FFFFFF !important; | |
| caret-color: #0F172A !important; | |
| -webkit-text-fill-color: #0F172A !important; | |
| } | |
| .stApp input::placeholder, | |
| .stApp textarea::placeholder { | |
| color: #94A3B8 !important; | |
| -webkit-text-fill-color: #94A3B8 !important; | |
| } | |
| /* Dropdown menu */ | |
| .stApp [data-baseweb="popover"], | |
| .stApp [data-baseweb="popover"] > div, | |
| .stApp [data-baseweb="menu"], | |
| .stApp [data-baseweb="menu"] li, | |
| .stApp ul[role="listbox"], | |
| .stApp ul[role="listbox"] li { | |
| background: #FFFFFF !important; | |
| color: #0F172A !important; | |
| } | |
| .stApp ul[role="listbox"] li:hover, | |
| .stApp [data-baseweb="menu"] li:hover { | |
| background: #FBF5E6 !important; | |
| } | |
| /* Expander header β white bg, dark text */ | |
| .stApp [data-testid="stExpander"] > details > summary { | |
| background: #FFFFFF !important; | |
| color: #334155 !important; | |
| } | |
| .stApp [data-testid="stExpander"] > details > summary span { | |
| color: #334155 !important; | |
| } | |
| .stApp [data-testid="stExpander"] > details > div { | |
| background: #FFFFFF !important; | |
| } | |
| /* Info/warning/error/success boxes β preserve their native colors */ | |
| .stApp .stAlert p, | |
| .stApp .stAlert span, | |
| .stApp [data-testid="stNotification"] p, | |
| .stApp [data-testid="stNotification"] span { | |
| color: inherit !important; | |
| } | |
| /* Container borders β make white background explicit */ | |
| .stApp [data-testid="stExpander"], | |
| .stApp div[data-testid="stForm"] { | |
| background: #FFFFFF !important; | |
| } | |
| /* Login form text */ | |
| div[data-testid="stForm"] p, | |
| section[data-testid="stForm"] p, | |
| div[data-testid="stForm"] label, | |
| section[data-testid="stForm"] label, | |
| div[data-testid="stForm"] span, | |
| section[data-testid="stForm"] span { | |
| color: #334155 !important; | |
| } | |
| /* EXCEPTION β admin header keeps white text */ | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div[data-testid="stHorizontalBlock"]:first-of-type p, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div[data-testid="stHorizontalBlock"]:first-of-type span, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div[data-testid="stHorizontalBlock"]:first-of-type div, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type p, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type span, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type div { | |
| color: inherit !important; | |
| } | |
| /* Dataframe / table text */ | |
| .stApp .stDataFrame, | |
| .stApp .stDataEditor { | |
| background: #FFFFFF !important; | |
| } | |
| .block-container { padding-top: 1.5rem !important; } | |
| /* ββ Admin header ββ */ | |
| .admin-header-marker { display: none !important; } | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div[data-testid="stHorizontalBlock"]:first-of-type, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type { | |
| background: linear-gradient(135deg, #CFB991 0%, #E8D5A8 100%) !important; | |
| border-radius: 14px !important; | |
| padding: 1.25rem 2rem !important; | |
| margin-bottom: 1rem !important; | |
| box-shadow: 0 4px 20px rgba(207,185,145,0.4) !important; | |
| align-items: center !important; | |
| position: relative !important; | |
| overflow: hidden !important; | |
| } | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div[data-testid="stHorizontalBlock"]:first-of-type::before, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type::before { | |
| content: '' !important; | |
| position: absolute !important; | |
| top: 0; left: 0; right: 0 !important; | |
| height: 3px !important; | |
| background: linear-gradient(90deg, transparent 0%, rgba(255,255,255,0.7) 40%, rgba(255,255,255,1) 60%, rgba(255,255,255,0.7) 80%, transparent 100%) !important; | |
| } | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div[data-testid="stHorizontalBlock"]:first-of-type .stButton, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type .stButton { | |
| display: flex !important; | |
| justify-content: flex-end !important; | |
| } | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div[data-testid="stHorizontalBlock"]:first-of-type .stButton > button, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type .stButton > button { | |
| background: rgba(26,26,26,0.1) !important; | |
| color: #1a1a1a !important; | |
| border: 1px solid rgba(26,26,26,0.25) !important; | |
| border-radius: 7px !important; | |
| font-size: 0.8rem !important; | |
| font-weight: 600 !important; | |
| padding: 0.3rem 1rem !important; | |
| width: auto !important; | |
| } | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div[data-testid="stHorizontalBlock"]:first-of-type .stButton > button:hover, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type .stButton > button:hover { | |
| background: rgba(26,26,26,0.18) !important; | |
| } | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) [data-testid="stPageLink"] a, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type [data-testid="stPageLink"] a { | |
| display: flex !important; | |
| align-items: center !important; | |
| justify-content: center !important; | |
| background: rgba(26,26,26,0.1) !important; | |
| color: #1a1a1a !important; | |
| border: 1px solid rgba(26,26,26,0.25) !important; | |
| border-radius: 7px !important; | |
| font-size: 0.8rem !important; | |
| font-weight: 600 !important; | |
| padding: 0.3rem 1rem !important; | |
| text-decoration: none !important; | |
| transition: background 0.15s ease !important; | |
| } | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) [data-testid="stPageLink"] a *, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type [data-testid="stPageLink"] a * { | |
| color: #1a1a1a !important; | |
| } | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) [data-testid="stPageLink"] a:hover, | |
| div[data-testid="stVerticalBlock"]:has(.admin-header-marker) > div > div[data-testid="stHorizontalBlock"]:first-of-type [data-testid="stPageLink"] a:hover { | |
| background: rgba(26,26,26,0.18) !important; | |
| } | |
| /* ββ Tabs ββ */ | |
| .stTabs [data-baseweb="tab-list"] { | |
| background: #FFFFFF; | |
| border-radius: 10px; | |
| padding: 4px; | |
| gap: 4px; | |
| box-shadow: 0 1px 4px rgba(0,0,0,0.06); | |
| border: 1px solid #E2E8F0; | |
| } | |
| .stTabs [data-baseweb="tab"] { | |
| border-radius: 7px; | |
| padding: 0.45rem 1.4rem; | |
| font-weight: 500; | |
| font-size: 0.875rem; | |
| color: #64748B; | |
| transition: color 0.15s ease; | |
| } | |
| .stTabs [data-baseweb="tab"]:hover { color: #334155; } | |
| .stTabs [data-baseweb="tab"] { | |
| border-bottom: none; | |
| margin-bottom: 0; | |
| background: transparent !important; | |
| transition: color 0.15s ease, background 0.15s ease; | |
| } | |
| .stTabs [data-baseweb="tab"]:hover { color: #1a1a1a; background: #FBF5E6 !important; } | |
| .stTabs [aria-selected="true"] { | |
| background: #CFB991 !important; | |
| color: #1a1a1a !important; | |
| font-weight: 700; | |
| } | |
| /* ββ Metric cards ββ */ | |
| div[data-testid="metric-container"] { | |
| background: #FFFFFF; | |
| border-radius: 12px; | |
| padding: 1rem 1.1rem; | |
| box-shadow: 0 1px 4px rgba(0,0,0,0.06); | |
| border: 1px solid #E8E0D0; | |
| border-left: 3px solid #CFB991; | |
| } | |
| /* ββ General buttons ββ */ | |
| .stButton > button { | |
| border-radius: 8px; | |
| font-weight: 600; | |
| font-family: 'Inter', sans-serif !important; | |
| } | |
| .stButton > button[kind="primary"] { | |
| background: #CFB991 !important; | |
| color: #ffffff !important; | |
| border: none !important; | |
| } | |
| .stButton > button[kind="primary"]:hover { | |
| background: #B8A369 !important; | |
| box-shadow: 0 4px 12px rgba(207,185,145,0.4) !important; | |
| } | |
| /* ββ Login form card ββ */ | |
| div[data-testid="stForm"], | |
| section[data-testid="stForm"] { | |
| background: #ffffff !important; | |
| border-radius: 16px !important; | |
| padding: 3rem 3.5rem !important; | |
| box-shadow: 0 8px 32px rgba(0,0,0,0.1) !important; | |
| border-top: 4px solid #CFB991 !important; | |
| border-left: none !important; | |
| border-right: none !important; | |
| border-bottom: none !important; | |
| max-width: 640px !important; | |
| margin: 0 auto !important; | |
| width: 100% !important; | |
| } | |
| div[data-testid="stForm"] p, | |
| section[data-testid="stForm"] p { | |
| font-size: 1rem !important; | |
| color: #334155 !important; | |
| } | |
| div[data-testid="stForm"] label, | |
| section[data-testid="stForm"] label { | |
| color: #334155 !important; | |
| font-weight: 600 !important; | |
| } | |
| div[data-testid="stForm"] .stFormSubmitButton > button, | |
| section[data-testid="stForm"] .stFormSubmitButton > button, | |
| div[data-testid="stForm"] .stButton > button, | |
| section[data-testid="stForm"] .stButton > button { | |
| background: #CFB991 !important; | |
| color: #ffffff !important; | |
| border: none !important; | |
| border-radius: 8px !important; | |
| font-weight: 700 !important; | |
| font-size: 1rem !important; | |
| width: 100% !important; | |
| padding: 0.75rem 1rem !important; | |
| margin-top: 0.75rem !important; | |
| } | |
| div[data-testid="stForm"] .stFormSubmitButton > button:hover, | |
| section[data-testid="stForm"] .stFormSubmitButton > button:hover, | |
| div[data-testid="stForm"] .stButton > button:hover, | |
| section[data-testid="stForm"] .stButton > button:hover { | |
| background: #B8A369 !important; | |
| } | |
| div[data-testid="stForm"] [data-baseweb="input"], | |
| section[data-testid="stForm"] [data-baseweb="input"], | |
| div[data-testid="stForm"] [data-baseweb="base-input"], | |
| section[data-testid="stForm"] [data-baseweb="base-input"] { | |
| border: 1.5px solid #CBD5E1 !important; | |
| border-radius: 8px !important; | |
| box-shadow: none !important; | |
| background: #FFFFFF !important; | |
| } | |
| div[data-testid="stForm"] [data-baseweb="input"] input, | |
| section[data-testid="stForm"] [data-baseweb="input"] input { | |
| background: #FFFFFF !important; | |
| color: #0F172A !important; | |
| caret-color: #0F172A !important; | |
| } | |
| div[data-testid="stForm"] [data-baseweb="input"]:focus-within, | |
| section[data-testid="stForm"] [data-baseweb="input"]:focus-within { | |
| border: 2px solid #CFB991 !important; | |
| box-shadow: 0 0 0 3px rgba(207,185,145,0.2) !important; | |
| } | |
| div[data-testid="stForm"] [data-baseweb="input"] button, | |
| section[data-testid="stForm"] [data-baseweb="input"] button { | |
| background: transparent !important; | |
| color: #94A3B8 !important; | |
| border: none !important; | |
| width: auto !important; | |
| padding: 0 4px !important; | |
| margin: 0 !important; | |
| font-size: 1rem !important; | |
| font-weight: 400 !important; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # CONFIG | |
| config, using_hf = load_user_config() | |
| if config is None: | |
| st.error("Authentication configuration not found!") | |
| st.stop() | |
| # AUTH SYSTEM | |
| authenticator = stauth.Authenticate( | |
| config['credentials'], | |
| config['cookie']['name'], | |
| config['cookie']['key'], | |
| config['cookie']['expiry_days'] | |
| ) | |
| if not st.session_state.get("authentication_status"): | |
| st.markdown(""" | |
| <div style="text-align:center;padding:3rem 0 1.5rem 0;"> | |
| <div style="display:inline-block;background:linear-gradient(135deg,#CFB991 0%,#E8D5A8 100%); | |
| padding:1.25rem 3.5rem;border-radius:14px;position:relative;overflow:hidden; | |
| box-shadow:0 4px 20px rgba(207,185,145,0.4);"> | |
| <div style="position:absolute;top:0;left:0;right:0;height:3px; | |
| background:linear-gradient(90deg,transparent 0%,rgba(255,255,255,0.7) 40%,rgba(255,255,255,1) 60%,rgba(255,255,255,0.7) 80%,transparent 100%);"></div> | |
| <span style="color:#1a1a1a;font-size:2rem;font-weight:800;letter-spacing:-0.02em;">Admin Panel</span> | |
| </div> | |
| <p style="color:#64748B;font-size:0.95rem;margin-top:1rem;margin-bottom:0;"> | |
| Vanderbilt AI Law Lab β Restricted Access | |
| </p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| try: | |
| authenticator.login('main') | |
| except Exception as e: | |
| st.error(f"Login error: {e}") | |
| name = st.session_state.get("name") | |
| authentication_status = st.session_state.get("authentication_status") | |
| username = st.session_state.get("username") | |
| if authentication_status == False: | |
| st.error('Username/password is incorrect') | |
| st.stop() | |
| if authentication_status == None: | |
| st.stop() | |
| # ====================================================================== | |
| # HELPERS | |
| # ====================================================================== | |
| def _count_json(path): | |
| try: | |
| with open(path, "r", encoding="utf-8") as f: | |
| data = json.load(f) | |
| return len(data) if isinstance(data, (list, dict)) else 0 | |
| except (FileNotFoundError, json.JSONDecodeError): | |
| return 0 | |
| def _count_unique_bills(path): | |
| """Count unique bill keys (state_bill_number) in the bills list.""" | |
| try: | |
| with open(path, "r", encoding="utf-8") as f: | |
| bills = json.load(f) | |
| if not isinstance(bills, list): | |
| return len(bills) if isinstance(bills, dict) else 0 | |
| keys = {f"{b.get('state', 'Unknown')}_{b.get('bill_number', 'Unknown')}" for b in bills} | |
| return len(keys) | |
| except (FileNotFoundError, json.JSONDecodeError): | |
| return 0 | |
| def _format_duration(seconds): | |
| """Format seconds into human-readable duration.""" | |
| if seconds is None: | |
| return "β" | |
| seconds = int(seconds) | |
| if seconds < 60: | |
| return f"{seconds}s" | |
| elif seconds < 3600: | |
| m, s = divmod(seconds, 60) | |
| return f"{m}m {s}s" | |
| else: | |
| h, remainder = divmod(seconds, 3600) | |
| m, s = divmod(remainder, 60) | |
| return f"{h}h {m}m" | |
| def _status_icon(status): | |
| """Return an emoji icon for a step status.""" | |
| return { | |
| "pending": "β³", | |
| "running": "π", | |
| "completed": "β ", | |
| "error": "β", | |
| "skipped": "βοΈ", | |
| }.get(status, "β") | |
| def _render_overall_progress(status_data): | |
| """Render overall pipeline progress header.""" | |
| steps = status_data.get("steps", []) | |
| total = len(steps) | |
| completed = sum(1 for s in steps if s["status"] in ("completed", "skipped")) | |
| failed = sum(1 for s in steps if s["status"] == "error") | |
| done = completed + failed | |
| # Overall progress bar | |
| progress_frac = done / total if total > 0 else 0.0 | |
| st.progress(progress_frac, text=f"Pipeline Progress: {done}/{total} steps") | |
| # Status badge | |
| overall = status_data.get("overall_status", "unknown") | |
| if overall == "running": | |
| st.info("Pipeline is running...", icon="π") | |
| elif overall == "completed": | |
| st.success("Pipeline completed successfully!", icon="β ") | |
| elif overall == "completed_with_errors": | |
| st.warning("Pipeline completed with some errors", icon="β οΈ") | |
| elif overall == "failed": | |
| st.error("Pipeline failed", icon="β") | |
| # Metrics row | |
| total_errors = sum(len(s.get("errors", [])) for s in steps) | |
| col1, col2, col3 = st.columns(3) | |
| with col1: | |
| elapsed_str = "β" | |
| if status_data.get("started_at"): | |
| try: | |
| started = datetime.fromisoformat(status_data["started_at"]) | |
| elapsed = (datetime.now(timezone.utc) - started).total_seconds() | |
| elapsed_str = _format_duration(elapsed) | |
| except Exception: | |
| pass | |
| st.metric("Elapsed Time", elapsed_str) | |
| with col2: | |
| st.metric("Steps Completed", f"{completed}/{total}") | |
| with col3: | |
| st.metric("Errors", str(total_errors)) | |
| # ETA for currently running step | |
| running_steps = [s for s in steps if s["status"] == "running"] | |
| if running_steps: | |
| rs = running_steps[0] | |
| prog = rs.get("progress", {}) | |
| current = prog.get("current", 0) | |
| total_items = prog.get("total", 0) | |
| if current > 0 and total_items > current and rs.get("started_at"): | |
| try: | |
| started = datetime.fromisoformat(rs["started_at"]) | |
| elapsed_secs = (datetime.now(timezone.utc) - started).total_seconds() | |
| rate = current / elapsed_secs if elapsed_secs > 0 else 0 | |
| remaining = total_items - current | |
| eta_secs = remaining / rate if rate > 0 else 0 | |
| st.caption( | |
| f"**Current step:** {rs['name']} β " | |
| f"~{rate * 60:.1f} {prog.get('unit', 'items')}/min | " | |
| f"ETA: ~{_format_duration(eta_secs)}" | |
| ) | |
| except Exception: | |
| pass | |
| def _render_step_cards(status_data): | |
| """Render per-script status cards in a 2-column layout.""" | |
| steps = status_data.get("steps", []) | |
| col1, col2 = st.columns(2) | |
| for i, step in enumerate(steps): | |
| target_col = col1 if i % 2 == 0 else col2 | |
| with target_col: | |
| status = step.get("status", "pending") | |
| icon = _status_icon(status) | |
| with st.container(border=True): | |
| st.markdown(f"**{icon} {step['name']}**") | |
| # Progress bar | |
| progress = step.get("progress", {}) | |
| current = progress.get("current", 0) | |
| total = progress.get("total", 0) | |
| if total > 0 and status == "running": | |
| st.progress(min(current / total, 1.0), | |
| text=f"{current}/{total} {progress.get('unit', 'items')}") | |
| elif status == "completed": | |
| st.progress(1.0, text="Complete") | |
| # Duration | |
| if step.get("duration_seconds"): | |
| st.caption(f"Duration: {_format_duration(step['duration_seconds'])}") | |
| elif step.get("started_at") and status == "running": | |
| try: | |
| started = datetime.fromisoformat(step["started_at"]) | |
| elapsed = (datetime.now(timezone.utc) - started).total_seconds() | |
| st.caption(f"Running for {_format_duration(elapsed)}") | |
| except Exception: | |
| pass | |
| # Message | |
| if step.get("message"): | |
| st.caption(step["message"]) | |
| # Error count | |
| errors = step.get("errors", []) | |
| if errors: | |
| st.caption(f"π΄ {len(errors)} error(s)") | |
| def _render_error_panel(status_data): | |
| """Render expandable error summary panel.""" | |
| steps = status_data.get("steps", []) | |
| all_errors = [] | |
| for step in steps: | |
| for err in step.get("errors", []): | |
| all_errors.append({ | |
| "Script": step["name"], | |
| "Bill": err.get("bill_key") or err.get("bill_id", "N/A"), | |
| "Error": err.get("error", "Unknown")[:200], | |
| "Time": err.get("timestamp", "")[:19], | |
| }) | |
| if not all_errors: | |
| return | |
| with st.expander(f"π΄ Errors ({len(all_errors)} total)", expanded=False): | |
| # Error counts per script | |
| error_by_script = {} | |
| for step in steps: | |
| errs = step.get("errors", []) | |
| if errs: | |
| error_by_script[step["name"]] = len(errs) | |
| if error_by_script: | |
| st.markdown("**Errors by Script:**") | |
| for script_name, count in error_by_script.items(): | |
| st.markdown(f"- {script_name}: **{count}**") | |
| st.markdown("---") | |
| st.markdown("**Failed Bills:**") | |
| st.dataframe(pd.DataFrame(all_errors), hide_index=True, width="stretch") | |
| def _render_run_history(): | |
| """Render the pipeline run history table.""" | |
| history_file = Path("data/pipeline_run_history.json") | |
| if not history_file.exists(): | |
| st.caption("No pipeline runs recorded yet.") | |
| return | |
| try: | |
| with open(history_file, "r", encoding="utf-8") as f: | |
| history = json.load(f) | |
| if not history: | |
| st.caption("No pipeline runs recorded yet.") | |
| return | |
| rows = [] | |
| for entry in reversed(history): # most recent first | |
| started = entry.get("started_at", "") | |
| try: | |
| dt = datetime.fromisoformat(started) | |
| date_str = dt.strftime("%b %d, %Y %I:%M %p") | |
| except Exception: | |
| date_str = started[:19] if started else "β" | |
| status_map = { | |
| "completed": "β Success", | |
| "completed_with_errors": "β οΈ Partial", | |
| "failed": "β Failed", | |
| } | |
| rows.append({ | |
| "Date": date_str, | |
| "Duration": _format_duration(entry.get("duration_seconds")), | |
| "Status": status_map.get(entry.get("overall_status", ""), entry.get("overall_status", "β")), | |
| "Steps OK": entry.get("steps_completed", 0), | |
| "Steps Failed": entry.get("steps_failed", 0), | |
| "Bills": entry.get("total_bills_processed", 0), | |
| "Errors": entry.get("total_errors", 0), | |
| "Triggered By": entry.get("triggered_by", "β"), | |
| }) | |
| st.dataframe(pd.DataFrame(rows), hide_index=True, width="stretch") | |
| except Exception: | |
| st.caption("Could not load run history.") | |
| DATA_FILES = { | |
| "Total Bills": "data/known_bills.json", | |
| "Bills with Details": "data/known_bills_visualize.json", | |
| "Summaries": "data/bill_summaries.json", | |
| "Question Sets": "data/bill_suggested_questions.json", | |
| "Reports": "data/bill_reports.json", | |
| "Cached Bills": "data/bill_cache.json", | |
| } | |
| # ====================================================================== | |
| # AUTHENTICATED VIEW | |
| # ====================================================================== | |
| if authentication_status: | |
| ALLOWED_USERNAMES = set(config['credentials']['usernames'].keys()) | |
| if username not in ALLOWED_USERNAMES: | |
| st.error(f"User '{username}' is not authorized.") | |
| st.stop() | |
| # ββ Header βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| st.markdown('<div class="admin-header-marker"></div>', unsafe_allow_html=True) | |
| hdr_title, hdr_user, hdr_home, hdr_btn = st.columns([5, 3, 1, 1]) | |
| with hdr_title: | |
| st.markdown(""" | |
| <div style="padding:0.4rem 0;"> | |
| <div style="color:#ffffff;font-size:1.55rem;font-weight:800;line-height:1.2;letter-spacing:-0.02em;">Admin Panel</div> | |
| <div style="color:rgba(255,255,255,0.65);font-size:0.72rem;font-weight:600;text-transform:uppercase;letter-spacing:0.1em;margin-top:0.15rem;">Vanderbilt AI Law Lab</div> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| with hdr_user: | |
| st.markdown(f""" | |
| <div style="padding:0.3rem 0;text-align:right;"> | |
| <div style="color:rgba(255,255,255,0.65);font-size:0.92rem;"> | |
| Logged in as <strong style="color:#fff;">{name}</strong> ({username}) | |
| </div> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| with hdr_home: | |
| st.page_link("streamlit_app.py", label="Home") | |
| with hdr_btn: | |
| authenticator.logout('Logout', 'main') | |
| # ββ New feedback notification ββββββββββββββββββββββββββββββββββββββββββ | |
| _notif_fb_path = Path("data/feedback.json") | |
| _new_feedback = [] | |
| if _notif_fb_path.exists(): | |
| try: | |
| with open(_notif_fb_path, "r") as _nf: | |
| _all_fb = json.load(_nf) | |
| _new_feedback = [f for f in _all_fb if f.get("status") == "new"] | |
| except Exception: | |
| pass | |
| if _new_feedback and not st.session_state.get("_fb_notif_dismissed"): | |
| _notif_container = st.container() | |
| with _notif_container: | |
| st.markdown( | |
| f'<div style="background:#FEF3C7;border:1px solid #F59E0B;border-left:4px solid #F59E0B;' | |
| f'border-radius:6px;padding:0.75rem 1rem;margin-bottom:1rem;display:flex;align-items:center;">' | |
| f'<span style="font-size:1.3rem;margin-right:0.6rem;">π</span>' | |
| f'<span style="color:#92400E;font-weight:600;">' | |
| f'You have {len(_new_feedback)} new feedback report{"s" if len(_new_feedback) != 1 else ""}! ' | |
| f'Go to the <b>Feedback</b> tab to review.</span></div>', | |
| unsafe_allow_html=True, | |
| ) | |
| if st.button("β Dismiss", key="dismiss_fb_notif"): | |
| st.session_state["_fb_notif_dismissed"] = True | |
| st.rerun() | |
| # Tabs | |
| tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs(["Overview", "Update Data", "Weekly Reports", "Manage Users", "Newsletter", "Feedback", "Analytics"]) | |
| # ================================================================== | |
| # TAB 1 β OVERVIEW | |
| # ================================================================== | |
| with tab1: | |
| # --- System Status --- | |
| st.subheader("System Status") | |
| status_cols = st.columns(3) | |
| with status_cols[0]: | |
| has_openai = bool(os.getenv("OPENAI_API_KEY")) | |
| st.metric("OpenAI API", "Connected" if has_openai else "Missing") | |
| with status_cols[1]: | |
| has_legiscan = bool(os.getenv("LEGISCAN_API_KEY")) | |
| st.metric("LegiScan API", "Connected" if has_legiscan else "Missing") | |
| with status_cols[2]: | |
| has_hf = bool(os.getenv("HUGGINGFACE_HUB_TOKEN") and os.getenv("HF_REPO_ID")) | |
| st.metric("HuggingFace", "Connected" if has_hf else "Missing") | |
| st.markdown("---") | |
| # --- Data Overview --- | |
| st.subheader("Current Data") | |
| data_cols = st.columns(3) | |
| for i, (label, path) in enumerate(DATA_FILES.items()): | |
| with data_cols[i % 3]: | |
| st.metric(label, f"{_count_json(path):,}") | |
| # --- Last Pipeline Run --- | |
| log_file = Path("pipeline_last_run.log") | |
| if log_file.exists(): | |
| last_run = datetime.fromtimestamp(log_file.stat().st_mtime).strftime("%b %d, %Y at %I:%M %p") | |
| st.caption(f"Last pipeline run: {last_run}") | |
| st.markdown("---") | |
| # --- Admin Users --- | |
| st.subheader("Admin Users") | |
| users = config['credentials']['usernames'] | |
| admin_data = [ | |
| { | |
| "Username": uname, | |
| "Name": udata.get("name", ""), | |
| "Email": udata.get("email", ""), | |
| } | |
| for uname, udata in users.items() | |
| ] | |
| st.dataframe(pd.DataFrame(admin_data), hide_index=True) | |
| # ================================================================== | |
| # TAB 2 β UPDATE DATA | |
| # ================================================================== | |
| with tab2: | |
| st.subheader("Update Data") | |
| st.markdown("Pull the latest AI legislation bills from all 50 states, generate summaries and reports, then sync to the cloud.") | |
| # --- Data Health Summary --- | |
| st.markdown("**Data Health**") | |
| total_bills = _count_unique_bills("data/known_bills_visualize.json") | |
| n_summaries = _count_json("data/bill_summaries.json") | |
| n_questions = _count_json("data/bill_suggested_questions.json") | |
| n_reports = _count_json("data/bill_reports.json") | |
| missing_summaries = max(0, total_bills - n_summaries) | |
| missing_questions = max(0, total_bills - n_questions) | |
| missing_reports = max(0, total_bills - n_reports) | |
| health_cols = st.columns(4) | |
| with health_cols[0]: | |
| st.metric("Total Bills", f"{total_bills:,}") | |
| with health_cols[1]: | |
| icon = "π’" if missing_summaries == 0 else "π‘" if missing_summaries < 50 else "π΄" | |
| st.metric(f"{icon} Missing Summaries", f"{missing_summaries:,}") | |
| with health_cols[2]: | |
| icon = "π’" if missing_questions == 0 else "π‘" if missing_questions < 50 else "π΄" | |
| st.metric(f"{icon} Missing Questions", f"{missing_questions:,}") | |
| with health_cols[3]: | |
| icon = "π’" if missing_reports == 0 else "π‘" if missing_reports < 50 else "π΄" | |
| st.metric(f"{icon} Missing Reports", f"{missing_reports:,}") | |
| st.markdown("---") | |
| # --- Check if pipeline is already running --- | |
| existing_status = PipelineStatus.load() | |
| pipeline_already_running = False | |
| if existing_status and existing_status.get("overall_status") == "running": | |
| # Check staleness (6 hour timeout) | |
| try: | |
| started = datetime.fromisoformat(existing_status["started_at"]) | |
| age_hours = (datetime.now(timezone.utc) - started).total_seconds() / 3600 | |
| if age_hours < 6: | |
| pipeline_already_running = True | |
| except Exception: | |
| pass | |
| if pipeline_already_running: | |
| st.warning("A pipeline is already running. Live status shown below.") | |
| _render_overall_progress(existing_status) | |
| _render_step_cards(existing_status) | |
| _render_error_panel(existing_status) | |
| # Live log viewer | |
| log_path = Path("pipeline_last_run.log") | |
| if log_path.exists() and log_path.stat().st_size > 0: | |
| with st.expander("View live log", expanded=False): | |
| log_content = log_path.read_text(encoding="utf-8", errors="ignore") | |
| log_lines = log_content.strip().split("\n") | |
| st.code("\n".join(log_lines[-100:]), language=None) | |
| btn_col1, btn_col2 = st.columns([1, 1]) | |
| with btn_col1: | |
| if st.button("Refresh Status"): | |
| st.rerun() | |
| with btn_col2: | |
| if st.button("Cancel Pipeline", type="secondary"): | |
| pid = existing_status.get("pid") | |
| if pid: | |
| try: | |
| os.kill(pid, signal.SIGTERM) | |
| cancel_status = PipelineStatus() | |
| cancel_status._data = existing_status | |
| cancel_status.fail_pipeline("Cancelled by admin user") | |
| st.success("Pipeline cancelled.") | |
| time.sleep(1) | |
| st.rerun() | |
| except ProcessLookupError: | |
| st.warning("Process already finished.") | |
| st.rerun() | |
| else: | |
| st.warning("No PID found β pipeline may have been started from CLI.") | |
| # Auto-refresh every 3 seconds to show live progress | |
| time.sleep(3) | |
| st.rerun() | |
| else: | |
| # --- Check if pipeline just finished --- | |
| pipeline_just_finished = ( | |
| existing_status | |
| and existing_status.get("overall_status") in ("completed", "completed_with_errors", "failed") | |
| and st.session_state.get("pipeline_started_by_admin") | |
| ) | |
| if pipeline_just_finished: | |
| # Clear flag to prevent re-triggering on refresh | |
| st.session_state["pipeline_started_by_admin"] = False | |
| overall = existing_status.get("overall_status", "") | |
| if overall == "completed": | |
| st.success("Pipeline completed successfully!", icon="β ") | |
| elif overall == "completed_with_errors": | |
| st.warning("Pipeline completed with some errors", icon="β οΈ") | |
| else: | |
| st.error("Pipeline failed", icon="β") | |
| _render_overall_progress(existing_status) | |
| _render_step_cards(existing_status) | |
| _render_error_panel(existing_status) | |
| # Data changes comparison | |
| pre_snapshot = st.session_state.pop("pre_snapshot", {}) | |
| if pre_snapshot: | |
| st.markdown("---") | |
| st.markdown("### Data Changes") | |
| post_snapshot = {label: _count_json(path) for label, path in DATA_FILES.items()} | |
| delta_cols = st.columns(3) | |
| for i, (label, path) in enumerate(DATA_FILES.items()): | |
| with delta_cols[i % 3]: | |
| before = pre_snapshot.get(label, 0) | |
| after = post_snapshot.get(label, 0) | |
| delta = after - before | |
| st.metric(label, f"{after:,}", | |
| delta=f"{delta:+,}" if delta != 0 else None) | |
| # HuggingFace upload | |
| skip_upload = st.session_state.pop("pipeline_skip_upload", False) | |
| if not skip_upload: | |
| st.markdown("---") | |
| st.markdown("### Uploading to HuggingFace") | |
| with st.spinner("Syncing data to cloud..."): | |
| try: | |
| urls = upload_all_to_huggingface() | |
| st.success(f"Uploaded {len(urls)} files to HuggingFace!") | |
| st.cache_data.clear() | |
| except Exception as e: | |
| st.error(f"Upload failed: {e}") | |
| # Show log from the run | |
| log_path = Path("pipeline_last_run.log") | |
| if log_path.exists() and log_path.stat().st_size > 0: | |
| with st.expander("View pipeline log"): | |
| st.code(log_path.read_text(encoding="utf-8", errors="ignore"), language=None) | |
| else: | |
| # --- Resume from step option --- | |
| from data_updating_scripts.pipeline_status import PipelineStatus as _PS | |
| _all_scripts = [ | |
| "data_updating_scripts/get_data.py", | |
| "data_updating_scripts/fix_pdf_bills.py", | |
| "data_updating_scripts/known_bills_status.py", | |
| "data_updating_scripts/migrate_iapp_categories.py", | |
| "data_updating_scripts/mark_no_text_bills.py", | |
| "data_updating_scripts/generate_summaries.py", | |
| "data_updating_scripts/generate_suggested_questions.py", | |
| "data_updating_scripts/generate_reports.py", | |
| "data_updating_scripts/build_bills_vectorstore.py", | |
| "data_updating_scripts/eu_vectorstore.py", | |
| "data_updating_scripts/detect_changes.py", | |
| "data_updating_scripts/build_calendar.py", | |
| "data_updating_scripts/generate_newsletter.py", | |
| ] | |
| _step_labels = [f"{i+1}. {SCRIPT_DISPLAY_NAMES.get(s, s)}" for i, s in enumerate(_all_scripts)] | |
| col_start, col_stop = st.columns(2) | |
| with col_start: | |
| start_from = st.selectbox( | |
| "Start from step", | |
| options=range(len(_all_scripts)), | |
| format_func=lambda i: _step_labels[i], | |
| index=0, | |
| help="Resume pipeline from a specific step (skips earlier steps).", | |
| ) | |
| with col_stop: | |
| stop_after = st.selectbox( | |
| "Stop after step", | |
| options=range(len(_all_scripts)), | |
| format_func=lambda i: _step_labels[i], | |
| index=len(_all_scripts) - 1, | |
| help="Stop pipeline after this step (skips later steps).", | |
| ) | |
| reset_vs = st.checkbox( | |
| "Reset vectorstore", | |
| help="Delete all Pinecone vectors and re-embed from scratch. Use this if the vectorstore is full of duplicates.", | |
| ) | |
| # --- Main Update Button --- | |
| if st.button("Update Data", type="primary"): | |
| st.session_state["pre_snapshot"] = {label: _count_json(path) for label, path in DATA_FILES.items()} | |
| st.session_state["pipeline_skip_upload"] = False | |
| st.session_state["pipeline_started_by_admin"] = True | |
| update_cmd = [ | |
| sys.executable, "update_data.py", | |
| "--pull", "--overwrite-pdf", "--continue-on-error", | |
| ] | |
| if start_from > 0: | |
| update_cmd += ["--start-from", str(start_from + 1)] | |
| if stop_after < len(_all_scripts) - 1: | |
| update_cmd += ["--stop-after", str(stop_after + 1)] | |
| if reset_vs: | |
| update_cmd += ["--reset-vectorstore"] | |
| env = os.environ.copy() | |
| openai_key = os.getenv("OPENAI_API_KEY") | |
| if openai_key: | |
| env["OPENAI_API_KEY"] = openai_key | |
| env["PIPELINE_TRIGGERED_BY"] = "admin_panel" | |
| log_fh = open("pipeline_last_run.log", "w", encoding="utf-8") | |
| subprocess.Popen( | |
| update_cmd, | |
| stdout=log_fh, stderr=subprocess.STDOUT, | |
| env=env, | |
| ) | |
| time.sleep(2) | |
| st.rerun() | |
| # --- Test Pipeline Button --- | |
| st.markdown("---") | |
| with st.expander("Test Pipeline (Quick Verification)"): | |
| st.markdown(""" | |
| Run a quick test of the pipeline with only 3 bills from California. | |
| This verifies all scripts are working without consuming significant API quota. | |
| """) | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| test_bill_count = st.number_input("Number of test bills", min_value=1, max_value=10, value=3) | |
| with col2: | |
| st.write("") # Spacing | |
| if st.button("Run Test Pipeline", type="secondary"): | |
| st.session_state["pipeline_skip_upload"] = True | |
| st.session_state["pipeline_started_by_admin"] = True | |
| st.session_state["test_mode"] = True | |
| test_cmd = [ | |
| sys.executable, "update_data.py", | |
| "--test", f"--test-count={test_bill_count}", | |
| "--skip-upload", "--continue-on-error", | |
| ] | |
| env = os.environ.copy() | |
| openai_key = os.getenv("OPENAI_API_KEY") | |
| if openai_key: | |
| env["OPENAI_API_KEY"] = openai_key | |
| env["PIPELINE_TRIGGERED_BY"] = "admin_panel_test" | |
| log_fh = open("pipeline_last_run.log", "w", encoding="utf-8") | |
| subprocess.Popen( | |
| test_cmd, | |
| stdout=log_fh, stderr=subprocess.STDOUT, | |
| env=env, | |
| ) | |
| time.sleep(2) | |
| st.rerun() | |
| # --- Run Individual Script --- | |
| st.markdown("---") | |
| def run_individual_script(): | |
| with st.expander("Run Individual Script"): | |
| if pipeline_already_running: | |
| st.warning("Cannot run individual scripts while a pipeline is running.") | |
| else: | |
| script_options = {name: path for path, name in SCRIPT_DISPLAY_NAMES.items()} | |
| selected_name = st.selectbox("Select script", list(script_options.keys())) | |
| selected_script = script_options[selected_name] | |
| if st.button("Run Script"): | |
| env = os.environ.copy() | |
| openai_key = os.getenv("OPENAI_API_KEY") | |
| if openai_key: | |
| env["OPENAI_API_KEY"] = openai_key | |
| with st.status(f"Running {selected_name}...", expanded=True) as sw: | |
| proc = subprocess.Popen( | |
| [sys.executable, selected_script], | |
| stdout=subprocess.PIPE, stderr=subprocess.STDOUT, | |
| text=True, bufsize=1, env=env, | |
| ) | |
| output_container = st.empty() | |
| lines = [] | |
| for line in proc.stdout: | |
| lines.append(line.rstrip("\n")) | |
| output_container.code("\n".join(lines[-50:]), language=None) | |
| rc = proc.wait() | |
| if rc == 0: | |
| sw.update(label=f"{selected_name} completed!", state="complete") | |
| else: | |
| sw.update(label=f"{selected_name} failed (exit {rc})", state="error") | |
| run_individual_script() | |
| # --- Run History --- | |
| st.markdown("---") | |
| st.subheader("Pipeline Run History") | |
| _render_run_history() | |
| # --- Last Run Log --- | |
| log_path = Path("pipeline_last_run.log") | |
| if log_path.exists() and log_path.stat().st_size > 0: | |
| with st.expander("View last pipeline log"): | |
| st.code(log_path.read_text(encoding="utf-8", errors="ignore"), language=None) | |
| # ================================================================== | |
| # TAB 3 β WEEKLY REPORTS | |
| # ================================================================== | |
| with tab3: | |
| st.subheader("Weekly Change Reports") | |
| st.markdown("View reports of new bills and status changes detected after each pipeline run.") | |
| def weekly_reports_viewer(): | |
| changes_dir = Path("data/weekly_changes") | |
| if not changes_dir.exists() or not list(changes_dir.glob("*.csv")): | |
| st.info("No change reports found yet. Reports are generated automatically after each pipeline run once a baseline snapshot exists.") | |
| else: | |
| csv_files = sorted(changes_dir.glob("*.csv"), key=lambda p: p.name, reverse=True) | |
| if csv_files: | |
| # Dropdown to select which report to view | |
| csv_options = {f.stem.replace("weekly_changes_", "Week of "): f for f in csv_files} | |
| selected_label = st.selectbox( | |
| "Select report", | |
| options=list(csv_options.keys()), | |
| index=0, | |
| key="weekly_report_select" | |
| ) | |
| selected_csv = csv_options[selected_label] | |
| try: | |
| df_changes = pd.read_csv(selected_csv) | |
| total_changes = len(df_changes) | |
| new_bills = len(df_changes[df_changes['change_type'] == 'new_bill']) | |
| signed = len(df_changes[df_changes['change_type'] == 'signed_into_law']) | |
| vetoed = len(df_changes[df_changes['change_type'] == 'vetoed']) | |
| other_changes = total_changes - new_bills - signed - vetoed | |
| metric_cols = st.columns(5) | |
| with metric_cols[0]: | |
| st.metric("Total Changes", total_changes) | |
| with metric_cols[1]: | |
| st.metric("New Bills", new_bills) | |
| with metric_cols[2]: | |
| st.metric("Signed Into Law", signed) | |
| with metric_cols[3]: | |
| st.metric("Vetoed", vetoed) | |
| with metric_cols[4]: | |
| st.metric("Other Changes", other_changes) | |
| # Full table view | |
| st.dataframe(df_changes, hide_index=True, width="stretch") | |
| # Download button | |
| with open(selected_csv, "rb") as f: | |
| st.download_button( | |
| label=f"Download {selected_csv.name}", | |
| data=f.read(), | |
| file_name=selected_csv.name, | |
| mime="text/csv", | |
| ) | |
| except Exception as e: | |
| st.error(f"Error loading CSV: {e}") | |
| weekly_reports_viewer() | |
| # ================================================================== | |
| # TAB 4 β MANAGE USERS | |
| # ================================================================== | |
| with tab4: | |
| st.subheader("Manage Users") | |
| if using_hf: | |
| try: | |
| user_manager = HuggingFaceUserManager() | |
| # --- Add New Admin --- | |
| with st.expander("Add New Admin"): | |
| with st.form("add_user_form"): | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| new_username = st.text_input("Username", key="new_username") | |
| new_email = st.text_input("Email", key="new_email") | |
| with col2: | |
| new_name = st.text_input("Full Name", key="new_name") | |
| new_password = st.text_input("Password", type="password", key="new_password") | |
| if st.form_submit_button("Add Admin", type="primary"): | |
| if not all([new_username, new_email, new_name, new_password]): | |
| st.error("Please fill in all fields") | |
| else: | |
| with st.spinner("Adding user..."): | |
| import bcrypt | |
| hashed = bcrypt.hashpw(new_password.encode(), bcrypt.gensalt()).decode() | |
| success, message, commit_url = user_manager.add_user( | |
| new_username, new_email, new_name, hashed | |
| ) | |
| if success: | |
| st.success(message) | |
| st.cache_data.clear() | |
| st.rerun() | |
| else: | |
| st.error(message) | |
| # --- Edit Admin --- | |
| users = config['credentials']['usernames'] | |
| usernames_list = list(users.keys()) | |
| with st.expander("Edit Admin"): | |
| with st.form("edit_user_form"): | |
| user_to_edit = st.selectbox("Select user", options=usernames_list, key="edit_username") | |
| current_user = users.get(user_to_edit, {}) | |
| st.caption(f"Current: {current_user.get('name', 'N/A')} β {current_user.get('email', 'N/A')}") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| new_email = st.text_input("New Email", key="edit_email", placeholder="Leave blank to keep current") | |
| new_password = st.text_input("New Password", type="password", key="edit_password", placeholder="Leave blank to keep current") | |
| with col2: | |
| new_name = st.text_input("New Name", key="edit_name", placeholder="Leave blank to keep current") | |
| if st.form_submit_button("Update Admin", type="primary"): | |
| if not any([new_email, new_name, new_password]): | |
| st.warning("Enter at least one field to update") | |
| else: | |
| with st.spinner("Updating user..."): | |
| hashed = None | |
| if new_password: | |
| import bcrypt | |
| hashed = bcrypt.hashpw(new_password.encode(), bcrypt.gensalt()).decode() | |
| success, message, commit_url = user_manager.update_user( | |
| user_to_edit, | |
| new_email=new_email or None, | |
| new_name=new_name or None, | |
| new_password=hashed | |
| ) | |
| if success: | |
| st.success(message) | |
| if user_to_edit == username and new_password: | |
| st.info("Log out and log back in to use your new password.") | |
| st.cache_data.clear() | |
| st.rerun() | |
| else: | |
| st.error(message) | |
| # --- Remove Admin --- | |
| if len(usernames_list) > 1: | |
| with st.expander("Remove Admin"): | |
| with st.form("remove_user_form"): | |
| user_to_remove = st.selectbox("Select user to remove", options=usernames_list, key="remove_username") | |
| st.warning(f"This will permanently delete **{user_to_remove}**.") | |
| confirm = st.checkbox("I confirm I want to remove this user") | |
| if st.form_submit_button("Remove Admin"): | |
| if not confirm: | |
| st.error("Please confirm the removal") | |
| elif user_to_remove == username: | |
| st.error("You cannot remove yourself!") | |
| else: | |
| with st.spinner("Removing user..."): | |
| success, message, commit_url = user_manager.remove_user(user_to_remove) | |
| if success: | |
| st.success(message) | |
| st.cache_data.clear() | |
| st.rerun() | |
| else: | |
| st.error(message) | |
| st.markdown("---") | |
| # --- Current Admins List --- | |
| st.markdown("**Current Admins**") | |
| for uname, udata in users.items(): | |
| label = f"{udata.get('name', uname)} (@{uname})" | |
| if uname == username: | |
| label += " β you" | |
| st.caption(f"{label} | {udata.get('email', '')}") | |
| except Exception as e: | |
| st.error(f"Error initializing user manager: {e}") | |
| st.exception(e) | |
| else: | |
| st.info("User management requires HuggingFace. Set `HUGGINGFACE_HUB_TOKEN` and `HF_REPO_ID` in your `.env` file to enable it.") | |
| with st.expander("Manual Setup"): | |
| st.markdown(""" | |
| **To add users without HuggingFace:** | |
| 1. Generate a password hash: | |
| ```bash | |
| python generate_password_hash.py | |
| ``` | |
| 2. Add the user to `auth_config.json`: | |
| ```json | |
| { | |
| "credentials": { | |
| "usernames": { | |
| "newuser": { | |
| "email": "user@vanderbilt.edu", | |
| "name": "New User", | |
| "password": "$2b$12$HASH_FROM_STEP_1" | |
| } | |
| } | |
| } | |
| } | |
| ``` | |
| 3. Restart the app. | |
| """) | |
| st.markdown("---") | |
| st.markdown("**Current Admins**") | |
| if 'credentials' in config and 'usernames' in config['credentials']: | |
| for uname, udata in config['credentials']['usernames'].items(): | |
| label = f"{udata.get('name', uname)} (@{uname})" | |
| if uname == username: | |
| label += " β you" | |
| st.caption(f"{label} | {udata.get('email', '')}") | |
| # ================================================================== | |
| # TAB 5 β NEWSLETTER | |
| # ================================================================== | |
| with tab5: | |
| st.subheader("Newsletter Subscribers") | |
| _sub_path = Path("data/subscribers.json") | |
| def _admin_load_subs(): | |
| try: | |
| with open(_sub_path) as f: | |
| return json.load(f) | |
| except Exception: | |
| return [] | |
| def _admin_save_subs(emails): | |
| with open(_sub_path, "w") as f: | |
| json.dump(emails, f, indent=2) | |
| try: | |
| from huggingface_hub import HfApi | |
| token = os.getenv("HUGGINGFACE_HUB_TOKEN") | |
| repo_id = os.getenv("HF_REPO_ID") | |
| if token and repo_id: | |
| HfApi(token=token).upload_file( | |
| path_or_fileobj=str(_sub_path), | |
| path_in_repo="subscribers.json", | |
| repo_id=repo_id, repo_type="dataset", | |
| commit_message="Update subscribers", | |
| ) | |
| except Exception: | |
| pass | |
| subs = _admin_load_subs() | |
| st.caption(f"{len(subs)} subscriber{'s' if len(subs) != 1 else ''}") | |
| if subs: | |
| sub_df = pd.DataFrame({"#": range(1, len(subs) + 1), "Email": subs}) | |
| st.dataframe( | |
| sub_df, hide_index=True, width='stretch', | |
| column_config={"#": st.column_config.NumberColumn(width="small")}, | |
| ) | |
| with st.expander("Remove a subscriber"): | |
| remove_email = st.selectbox("Select email to remove", subs, key="remove_sub_select") | |
| if st.button("Remove selected", key="remove_sub_btn"): | |
| subs.remove(remove_email) | |
| _admin_save_subs(subs) | |
| st.success(f"Removed {remove_email}") | |
| st.rerun() | |
| else: | |
| st.info("No subscribers yet.") | |
| st.markdown("**Add subscriber manually**") | |
| with st.form("admin_add_sub"): | |
| new_sub_email = st.text_input("Email", placeholder="someone@example.com") | |
| if st.form_submit_button("Add"): | |
| if new_sub_email and "@" in new_sub_email: | |
| subs = _admin_load_subs() | |
| clean = new_sub_email.strip().lower() | |
| if clean not in subs: | |
| subs.append(clean) | |
| _admin_save_subs(subs) | |
| st.success(f"Added {clean}") | |
| st.rerun() | |
| else: | |
| st.info("Already subscribed.") | |
| else: | |
| st.error("Enter a valid email.") | |
| # ================================================================== | |
| # TAB 6 β FEEDBACK | |
| # ================================================================== | |
| with tab6: | |
| st.subheader("User Feedback") | |
| _fb_path = Path("data/feedback.json") | |
| feedback_data = [] | |
| if _fb_path.exists(): | |
| try: | |
| with open(_fb_path, "r", encoding="utf-8") as _f: | |
| feedback_data = json.load(_f) | |
| except Exception: | |
| pass | |
| if not feedback_data: | |
| st.info("No feedback submissions yet.") | |
| else: | |
| new_count = sum(1 for f in feedback_data if f.get("status") == "new") | |
| st.markdown(f"**{new_count} new** Β· {len(feedback_data)} total submissions") | |
| # Show newest first | |
| for i, entry in enumerate(reversed(feedback_data)): | |
| real_idx = len(feedback_data) - 1 - i | |
| status = entry.get("status", "new") | |
| fb_type = entry.get("type", "General Feedback") | |
| ts = entry.get("timestamp", "")[:16].replace("T", " ") | |
| name = entry.get("name") or "Anonymous" | |
| desc_preview = (entry.get("description", "")[:80] + "...") if len(entry.get("description", "")) > 80 else entry.get("description", "") | |
| status_icon = {"new": "π΄", "in_progress": "π‘", "resolved": "π’", "dismissed": "β«"}.get(status, "βͺ") | |
| with st.expander(f"{status_icon} [{fb_type}] {desc_preview} β {name}, {ts}"): | |
| st.markdown(f"**From:** {name}") | |
| if entry.get("email"): | |
| st.markdown(f"**Email:** {entry['email']}") | |
| st.markdown(f"**Type:** {fb_type}") | |
| st.markdown(f"**Submitted:** {entry.get('timestamp', 'N/A')}") | |
| st.markdown("---") | |
| st.markdown(entry.get("description", "")) | |
| st.markdown("---") | |
| new_status = st.selectbox( | |
| "Status", | |
| ["new", "in_progress", "resolved", "dismissed"], | |
| index=["new", "in_progress", "resolved", "dismissed"].index(status), | |
| key=f"fb_status_{entry.get('id', real_idx)}", | |
| ) | |
| if new_status != status: | |
| feedback_data[real_idx]["status"] = new_status | |
| with open(_fb_path, "w", encoding="utf-8") as _f: | |
| json.dump(feedback_data, _f, indent=2, ensure_ascii=False) | |
| try: | |
| from huggingface_hub import HfApi | |
| token = os.getenv("HUGGINGFACE_HUB_TOKEN") | |
| repo_id = os.getenv("HF_REPO_ID") | |
| if token and repo_id: | |
| HfApi(token=token).upload_file( | |
| path_or_fileobj=str(_fb_path), | |
| path_in_repo="feedback.json", | |
| repo_id=repo_id, repo_type="dataset", | |
| commit_message="Update feedback status", | |
| ) | |
| except Exception: | |
| pass | |
| st.success(f"Status updated to {new_status}") | |
| st.rerun() | |
| if st.button("π Delete", key=f"fb_del_{entry.get('id', real_idx)}"): | |
| feedback_data.pop(real_idx) | |
| with open(_fb_path, "w", encoding="utf-8") as _f: | |
| json.dump(feedback_data, _f, indent=2, ensure_ascii=False) | |
| try: | |
| from huggingface_hub import HfApi | |
| token = os.getenv("HUGGINGFACE_HUB_TOKEN") | |
| repo_id = os.getenv("HF_REPO_ID") | |
| if token and repo_id: | |
| HfApi(token=token).upload_file( | |
| path_or_fileobj=str(_fb_path), | |
| path_in_repo="feedback.json", | |
| repo_id=repo_id, repo_type="dataset", | |
| commit_message="Delete feedback entry", | |
| ) | |
| except Exception: | |
| pass | |
| st.success("Feedback deleted.") | |
| st.rerun() | |
| # ================================================================== | |
| # TAB 7 β ANALYTICS | |
| # ================================================================== | |
| with tab7: | |
| st.subheader("Site Analytics") | |
| _analytics_path = Path("data/analytics.json") | |
| analytics = {} | |
| if _analytics_path.exists(): | |
| try: | |
| with open(_analytics_path, "r") as _f: | |
| analytics = json.load(_f) | |
| except Exception: | |
| pass | |
| if st.button("β Sync Analytics to HuggingFace", key="sync_analytics_hf"): | |
| try: | |
| from huggingface_hub import HfApi | |
| token = os.getenv("HUGGINGFACE_HUB_TOKEN") | |
| repo_id = os.getenv("HF_REPO_ID") | |
| if token and repo_id and _analytics_path.exists(): | |
| HfApi(token=token).upload_file( | |
| path_or_fileobj=str(_analytics_path), | |
| path_in_repo="analytics.json", | |
| repo_id=repo_id, repo_type="dataset", | |
| commit_message="Sync analytics data", | |
| ) | |
| st.success("Analytics synced to HuggingFace.") | |
| else: | |
| st.warning("HF credentials not configured.") | |
| except Exception as e: | |
| st.error(f"Sync failed: {e}") | |
| if not analytics: | |
| st.info("No analytics data yet. Visits are tracked when users load the main app.") | |
| else: | |
| # Summary metrics | |
| sorted_dates = sorted(analytics.keys()) | |
| total_visits = sum(d.get("visits", 0) for d in analytics.values()) | |
| today_str = datetime.now(timezone.utc).strftime("%Y-%m-%d") | |
| today_visits = analytics.get(today_str, {}).get("visits", 0) | |
| days_tracked = len(sorted_dates) | |
| avg_daily = total_visits / days_tracked if days_tracked > 0 else 0 | |
| c1, c2, c3, c4 = st.columns(4) | |
| c1.metric("Total Visits", f"{total_visits:,}") | |
| c2.metric("Today", f"{today_visits:,}") | |
| c3.metric("Days Tracked", f"{days_tracked:,}") | |
| c4.metric("Avg / Day", f"{avg_daily:.1f}") | |
| # Daily visits chart | |
| st.markdown("#### Daily Visits") | |
| chart_data = pd.DataFrame([ | |
| {"Date": d, "Visits": analytics[d].get("visits", 0)} | |
| for d in sorted_dates | |
| ]) | |
| chart_data["Date"] = pd.to_datetime(chart_data["Date"]) | |
| st.bar_chart(chart_data.set_index("Date")["Visits"]) | |
| # Hourly breakdown for today | |
| today_hours = analytics.get(today_str, {}).get("hours", {}) | |
| if today_hours: | |
| st.markdown("#### Today's Visits by Hour (UTC)") | |
| hour_data = pd.DataFrame([ | |
| {"Hour": f"{int(h):02d}:00", "Visits": v} | |
| for h, v in sorted(today_hours.items()) | |
| ]) | |
| st.bar_chart(hour_data.set_index("Hour")["Visits"]) | |
| # Recent days table | |
| st.markdown("#### Recent Activity") | |
| recent = sorted_dates[-14:] | |
| recent_df = pd.DataFrame([ | |
| {"Date": d, "Visits": analytics[d].get("visits", 0)} | |
| for d in reversed(recent) | |
| ]) | |
| st.dataframe(recent_df, hide_index=True, width="stretch") | |