|
|
""" |
|
|
Streamlit UI for HR Supervisor Agent. |
|
|
|
|
|
Connects to the Supervisor API with streaming support. |
|
|
Run with: streamlit run src/frontend/streamlit/supervisor_ui/app.py |
|
|
|
|
|
In Docker, set SUPERVISOR_API_URL environment variable. |
|
|
Locally, defaults to http://localhost:8080/api/v1/supervisor |
|
|
""" |
|
|
|
|
|
import streamlit as st |
|
|
from src.sdk import SupervisorClient |
|
|
|
|
|
|
|
|
client = SupervisorClient() |
|
|
|
|
|
st.set_page_config(page_title="HR Supervisor Agent", layout="wide") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
|
|
|
|
|
|
if "thread_id" not in st.session_state: |
|
|
st.session_state.thread_id = None |
|
|
|
|
|
st.title("π€ HR Supervisor Agent") |
|
|
st.caption("I can query the candidate database and help with recruitment tasks.") |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.header("Controls") |
|
|
if st.button("Start New Chat", type="primary", use_container_width=True): |
|
|
try: |
|
|
st.session_state.thread_id = client.new_chat() |
|
|
st.session_state.messages = [] |
|
|
st.session_state.token_usage = 0 |
|
|
except Exception: |
|
|
st.error("β οΈ Cannot connect to API. Is the server running?") |
|
|
st.rerun() |
|
|
|
|
|
st.divider() |
|
|
st.caption(f"Chat ID:\n`{st.session_state.get('thread_id', 'Not set')}`") |
|
|
|
|
|
|
|
|
token_metric_placeholder = st.empty() |
|
|
|
|
|
if "token_usage" in st.session_state: |
|
|
token_metric_placeholder.metric(label="Context Window Tokens", value=st.session_state.token_usage) |
|
|
|
|
|
|
|
|
for message in st.session_state.messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
|
|
|
|
|
|
if prompt := st.chat_input("Ask me anything about candidates..."): |
|
|
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(prompt) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
message_placeholder = st.empty() |
|
|
full_response = "" |
|
|
|
|
|
try: |
|
|
|
|
|
with st.spinner("Thinking..."): |
|
|
response = client.chat(prompt, st.session_state.thread_id) |
|
|
|
|
|
full_response = response.content |
|
|
message_placeholder.markdown(full_response) |
|
|
|
|
|
|
|
|
if st.session_state.thread_id is None: |
|
|
st.session_state.thread_id = response.thread_id |
|
|
|
|
|
|
|
|
st.session_state.token_usage = response.token_count |
|
|
token_metric_placeholder.metric( |
|
|
label="Context Window Tokens", |
|
|
value=response.token_count |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
full_response = f"β Error: {str(e)}" |
|
|
message_placeholder.error(full_response) |
|
|
|
|
|
|
|
|
if not full_response: |
|
|
full_response = "No response received from agent." |
|
|
message_placeholder.warning(full_response) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": full_response}) |
|
|
|