File size: 4,426 Bytes
363cda9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
"""
Streamlit UI for HR Supervisor Agent.
Connects to the Supervisor API with streaming support.
Run with: streamlit run src/frontend/streamlit/supervisor_ui/app.py
In Docker, set SUPERVISOR_API_URL environment variable.
Locally, defaults to http://localhost:8080/api/v1/supervisor
"""
import streamlit as st
from src.sdk import SupervisorClient
# Initialize SDK client
client = SupervisorClient()
st.set_page_config(page_title="HR Supervisor Agent", layout="wide")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Initialize thread_id for conversation continuity
if "thread_id" not in st.session_state:
st.session_state.thread_id = None
st.title("🤖 HR Supervisor Agent")
st.caption("I can query the candidate database and help with recruitment tasks.")
# Sidebar with "New Chat" button to reset context
with st.sidebar:
st.header("Controls")
if st.button("Start New Chat", type="primary", use_container_width=True):
try:
st.session_state.thread_id = client.new_chat()
st.session_state.messages = []
st.session_state.token_usage = 0
except Exception:
st.error("⚠️ Cannot connect to API. Is the server running?")
st.rerun()
st.divider()
st.caption(f"Chat ID:\n`{st.session_state.get('thread_id', 'Not set')}`")
# Placeholder for token usage to allow dynamic updates
token_metric_placeholder = st.empty()
if "token_usage" in st.session_state:
token_metric_placeholder.metric(label="Context Window Tokens", value=st.session_state.token_usage)
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# User input
if prompt := st.chat_input("Ask me anything about candidates..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Generate response using chat (with CompactingSupervisor wrapper)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
try:
# Use chat endpoint (with context compaction)
with st.spinner("Thinking..."):
response = client.chat(prompt, st.session_state.thread_id)
full_response = response.content
message_placeholder.markdown(full_response)
# Update thread_id if this was first message
if st.session_state.thread_id is None:
st.session_state.thread_id = response.thread_id
# Update token usage
st.session_state.token_usage = response.token_count
token_metric_placeholder.metric(
label="Context Window Tokens",
value=response.token_count
)
except Exception as e:
full_response = f"❌ Error: {str(e)}"
message_placeholder.error(full_response)
# Handle empty response
if not full_response:
full_response = "No response received from agent."
message_placeholder.warning(full_response)
# --- STREAMING RAW VERSION (commented out) ---
# for chunk in client.stream_raw(prompt, st.session_state.thread_id):
# if chunk.type == "token":
# full_response += chunk.content or ""
# message_placeholder.markdown(full_response + "▌")
# elif chunk.type == "done":
# if st.session_state.thread_id is None:
# st.session_state.thread_id = chunk.thread_id
# st.session_state.token_usage = chunk.token_count or 0
# token_metric_placeholder.metric(
# label="Context Window Tokens",
# value=chunk.token_count or 0
# )
# message_placeholder.markdown(full_response)
# elif chunk.type == "error":
# full_response = f"❌ Error: {chunk.error}"
# message_placeholder.error(full_response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": full_response})
|