Spaces:
Sleeping
Sleeping
File size: 1,992 Bytes
b663de0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | import streamlit as st
import tracker
from datetime import datetime
import json
def render_admin_sidebar():
"""Renders admin-only tools in the sidebar."""
st.divider()
st.header("π‘οΈ Admin Console")
# 1. Debug Toggle
# This controls whether the "Debug Overlay" appears in the main app
if st.toggle("π Enable Debug Overlay", value=st.session_state.get("debug_mode", False)):
st.session_state.debug_mode = True
st.caption("Showing raw prompts & token counts.")
else:
st.session_state.debug_mode = False
# 2. Log Downloader
log_path = tracker.get_log_path()
if log_path.exists():
with open(log_path, "r") as f:
log_data = f.read()
st.download_button(
label="π₯ Download Usage Logs",
data=log_data,
file_name=f"usage_log_{datetime.now().strftime('%Y-%m-%d')}.json",
mime="application/json"
)
def render_debug_overlay(location="Generic"):
"""
Renders a collapsible expander showing the LAST input sent to the LLM.
We pull this data from session_state, which the main app populates.
"""
if not st.session_state.get("debug_mode", False):
return
# Check if we have data to show
last_prompt = st.session_state.get("last_prompt_sent")
last_context = st.session_state.get("last_context_used")
if last_prompt:
with st.expander(f"π Debug: Raw LLM Input ({location})", expanded=False):
tab_p, tab_c = st.tabs(["π Prompt", "π Context"])
with tab_p:
st.caption(f"Length: {len(last_prompt)} chars")
st.code(last_prompt, language="text")
with tab_c:
if last_context:
st.caption("Raw retrieved chunks passed to model:")
st.text(last_context)
else:
st.info("No context used for this turn.") |