| import streamlit as st |
| import logging |
| |
| logging.basicConfig(level=logging.INFO) |
| from agents.agent_graph import build_agent_graph |
| from agents.websearch import WebSearchAgent |
| from agents.chat import ChatAgent |
| from datetime import datetime |
| import pytz |
| import time |
|
|
| |
| def generate_response_message(response): |
| full_response = "" |
| response_words = response.split() |
| |
| message_placeholder = st.empty() |
| for word in response_words: |
| full_response += word + " " |
| message_placeholder.markdown(full_response + "β") |
| time.sleep(0.05) |
| message_placeholder.markdown(full_response) |
| return full_response |
|
|
| |
| def generate_initial_message(): |
| vietnam_tz = pytz.timezone("Asia/Ho_Chi_Minh") |
| current_time = datetime.now(vietnam_tz).time() |
| if 5 <= current_time.hour < 12: |
| greeting = "Good morning" |
| elif 12 <= current_time.hour < 18: |
| greeting = "Good afternoon" |
| elif 18 <= current_time.hour < 21: |
| greeting = "Good evening" |
| else: |
| greeting = "π Hey there" |
| initial_prompt = f"{greeting}! I'm Kia β your Know-It-All assistant. Whether it's trivia, tech, or random tidbits, I'm always ready to drop some knowledge. What can I enlighten you about today?" |
| return initial_prompt |
|
|
| st.set_page_config( |
| page_title="Kia β Your Know-It-All Assistant", |
| page_icon="π€", |
| initial_sidebar_state="expanded" |
| ) |
| st.markdown( |
| "<h1 style='white-space:nowrap;'>π€ Kia β Your Know-It-All Assistant</h1>", |
| unsafe_allow_html=True |
| ) |
|
|
| |
| with st.sidebar: |
| st.title("βοΈ Settings") |
|
|
| |
|
|
| |
| if 'enable_docsearch' not in st.session_state: |
| st.session_state['enable_docsearch'] = True |
| if 'enable_websearch' not in st.session_state: |
| st.session_state['enable_websearch'] = True |
|
|
| with st.expander("Agent Settings", expanded=True): |
| st.checkbox("Enable Web Search", value=st.session_state['enable_websearch'], key='enable_websearch') |
| st.checkbox("Enable Document Search", value=st.session_state['enable_docsearch'], key='enable_docsearch') |
| if st.session_state['enable_docsearch']: |
| st.write("Upload PDF(s) or enter URL(s) to index for document search") |
| st.file_uploader("Upload PDF document(s)", type=["pdf"], accept_multiple_files=True, key="pdf_files") |
| st.text_input("Enter URL(s), comma-separated", key="doc_urls") |
| if st.button("Rebuild Index"): |
| st.rerun() |
| |
| |
| with st.expander("π¬ Chat Controls", expanded=True): |
| if st.button("Clear Conversation"): |
| st.session_state['chat_history'] = [] |
| st.rerun() |
| |
|
|
| |
| with st.expander("π οΈ Display Settings", expanded=True): |
| DEV_MODE = st.checkbox("Enable Dev Mode", value=False) |
| STREAMING_ENABLED = st.checkbox("Enable response streaming (just the effect π)", value=True) |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| with st.expander("βΉοΈ About", expanded=True): |
| st.markdown("""### Kia Chatbot |
| An intelligent assistant powered by LangGraph. |
| |
| **Quick Tips:** |
| - Use clear, specific questions |
| - Enable Dev Mode for detailed responses |
| - Clear conversation for fresh context |
| """) |
|
|
|
|
| if 'chat_history' not in st.session_state: |
| st.session_state['chat_history'] = [] |
|
|
|
|
| if 'waiting_for_response' not in st.session_state: |
| st.session_state['waiting_for_response'] = False |
| if 'should_stream' not in st.session_state: |
| st.session_state['should_stream'] = False |
| if 'initial_greeting_added' not in st.session_state: |
| |
| st.session_state['chat_history'].append({ |
| "user": "", |
| "bot": generate_initial_message(), |
| "agent": "chat", |
| "rephrased_query": None, |
| "trace": None, |
| "websearch_results": None |
| }) |
| st.session_state['initial_greeting_added'] = True |
| st.session_state['should_stream'] = True |
|
|
| |
| def set_user_input(): |
| if st.session_state.user_message and st.session_state.user_message.strip(): |
| |
| st.session_state['waiting_for_response'] = True |
| |
| st.session_state['chat_history'].append({ |
| "user": st.session_state.user_message, |
| "bot": None, |
| "agent": None, |
| "rephrased_query": None, |
| "trace": None |
| }) |
| |
|
|
|
|
| |
|
|
|
|
|
|
| |
| |
| |
| |
|
|
| for i, turn in enumerate(st.session_state['chat_history']): |
| |
| if turn.get('user'): |
| with st.chat_message("user", avatar="π€"): |
| st.markdown(turn['user']) |
| |
| is_waiting = st.session_state['waiting_for_response'] and i == len(st.session_state['chat_history']) - 1 |
| |
| if is_waiting: |
| import logging |
| from agents.orchestrator import OrchestratorAgent |
| user_input_wait = st.session_state['chat_history'][-1]['user'] |
| chat_hist_wait = st.session_state['chat_history'][:-1] |
| try: |
| orchestrator = OrchestratorAgent() |
| next_agent = orchestrator._decide_agent(user_input_wait, chat_hist_wait) |
| except Exception as e: |
| logging.error("Error deciding next agent: %s", e) |
| next_agent = 'chat' |
| with st.chat_message("Kia", avatar="π€"): |
| if next_agent == 'documentsearch' and st.session_state.get('enable_docsearch', False): |
| st.markdown("π Kia is searching for documents...") |
| elif next_agent == 'websearch' and st.session_state.get('enable_websearch', False): |
| st.markdown("π Kia is searching the web...") |
| else: |
| st.markdown("π Kia is thinking...") |
| continue |
| |
| if turn.get('bot') is None: |
| continue |
| |
| if i == len(st.session_state['chat_history']) - 1 and STREAMING_ENABLED and st.session_state['should_stream']: |
| with st.chat_message("Kia", avatar="π€"): |
| generate_response_message(turn['bot']) |
|
|
| |
| if turn.get('trace') is not None: |
| found_sources = False |
| for step in turn['trace']: |
| if step.get('agent') == 'websearch' and step.get('step') == 'search': |
| raw_results = step.get('raw_results') |
| if raw_results and len(raw_results.get('organic', [])) > 0: |
| found_sources = True |
| st.markdown("**Sources:**") |
| if isinstance(raw_results, dict) and raw_results.get('organic'): |
| for idx, item in enumerate(raw_results['organic'], 1): |
| link = item.get('link') |
| title = item.get('title', '') |
| if link: |
| st.markdown(f"{idx}. [{title}]({link})") |
| elif isinstance(raw_results, list): |
| for idx, item in enumerate(raw_results, 1): |
| link = item.get('link') if isinstance(item, dict) else item |
| title = item.get('title', '') if isinstance(item, dict) else '' |
| if link: |
| st.markdown(f"{idx}. [{title}]({link})") |
| break |
| |
| |
| st.session_state['should_stream'] = False |
| else: |
| with st.chat_message("Kia", avatar="π€"): |
| st.markdown(turn['bot']) |
| |
|
|
| if turn['trace'] is not None: |
| found_sources = False |
| for step in turn['trace']: |
| if step.get('agent') == 'websearch' and step.get('step') == 'search': |
| |
| raw_results = step.get('raw_results') |
| |
| if raw_results and len(raw_results.get('organic', [])) > 0: |
| st.markdown("**Sources:**") |
| found_sources = True |
| |
| if isinstance(raw_results, dict) and raw_results.get('organic'): |
| for idx, item in enumerate(raw_results['organic'], 1): |
| link = item.get('link') |
| title = item.get('title', '') |
| st.markdown(f"{idx}. [{title}]({link})") |
| found_sources = True |
| |
| elif isinstance(raw_results, list): |
| for idx, item in enumerate(raw_results, 1): |
| link = item.get('link') if isinstance(item, dict) else item |
| title = item.get('title', '') if isinstance(item, dict) else '' |
| st.markdown(f"{idx}. [{title}]({link})") |
| found_sources = True |
| break |
| |
| |
| |
| if DEV_MODE and turn.get('trace'): |
| trace = turn.get('trace', []) |
| |
| agent_groups = {} |
| for idx, step in enumerate(trace): |
| ag = step.get('agent', 'unknown') |
| agent_groups.setdefault(ag, []).append((idx, step)) |
| |
| agent_order = [ |
| ('orchestrator', 'OrchestratorAgent'), |
| ('documentsearch', 'DocumentSearchAgent'), |
| ('websearch', 'WebSearchAgent'), |
| ('chat', 'ChatAgent'), |
| ] |
| |
| for ag_key, display_name in agent_order: |
| steps = agent_groups.get(ag_key, []) |
| with st.expander(f"{display_name} Steps", expanded=False): |
| if not steps: |
| st.write("No steps executed.") |
| for idx, step in steps: |
| st.markdown(f"**Step {idx+1}:**") |
| if isinstance(step, dict): |
| for k, v in step.items(): |
| if k == 'context': |
| st.markdown(f"**{k}:**") |
| if isinstance(v, list): |
| table_data = [[i+1, getattr(doc, 'page_content', str(doc)).strip()] for i, doc in enumerate(v)] |
| else: |
| paragraphs = str(v).split('\n') |
| table_data = [[i+1, p.strip()] for i, p in enumerate(paragraphs) if p.strip()] |
| if table_data: |
| import pandas as pd |
| df = pd.DataFrame(table_data, columns=["#", "Content"]) |
| st.dataframe(df, use_container_width=True, |
| column_config={"#": st.column_config.NumberColumn("#", width=50), |
| "Content": st.column_config.TextColumn("Content")}) |
| elif k == 'prompt' or k == 'response' or k == 'classification': |
| st.markdown(f"**{k}:**") |
| st.code(str(v)) |
| else: |
| st.markdown(f"**{k}:**") |
| st.code(str(v)) |
| else: |
| st.code(str(step)) |
|
|
| |
| if st.session_state['waiting_for_response']: |
| user_input = st.session_state['chat_history'][-1]['user'] |
| agent_graph = build_agent_graph() |
| chat_history = st.session_state['chat_history'][:-1] |
| doc_urls = None |
| if st.session_state.get('doc_urls'): |
| doc_urls = [u.strip() for u in st.session_state['doc_urls'].split(",") if u.strip()] |
| pdf_files = st.session_state.get('pdf_files', []) |
| state = { |
| "user_input": user_input, |
| "chat_history": chat_history, |
| "enable_docsearch": st.session_state['enable_docsearch'], |
| "enable_websearch": st.session_state['enable_websearch'], |
| "doc_urls": doc_urls, |
| "pdf_files": pdf_files, |
| } |
|
|
| |
| try: |
| result_state = agent_graph.invoke(state) |
| except Exception as e: |
| |
| from agents.orchestrator import OrchestratorAgent |
| error_result = OrchestratorAgent().handle_system_error(e) |
| result_state = { |
| 'response': error_result['response'], |
| 'agent': error_result['agent'], |
| 'trace': error_result['trace'], |
| 'websearch_results': error_result['websearch_results'] |
| } |
| response = result_state.get('response', '') |
| agent_name = result_state.get('agent', result_state.get('next_agent', 'chat')) |
| rephrased_query = None |
| |
| |
| if agent_name == 'websearch': |
| history_str = "" |
| for turn in chat_history: |
| if turn.get('bot'): |
| history_str += f"User: {turn['user']}\nBot: {turn['bot']}\n" |
| from agents.websearch import WebSearchAgent |
| rephrase_prompt = ( |
| f"Given the following conversation history:\n" |
| f"{history_str}" |
| f"User: {user_input}\n" |
| f"Rephrase the user's latest question so it is clear and complete for a web search. " |
| f"Only output the rephrased question." |
| ) |
| rephrased_query = WebSearchAgent().llm.generate(rephrase_prompt).strip() |
| |
| |
| st.session_state['chat_history'][-1].update({ |
| "bot": response, |
| "agent": agent_name, |
| "rephrased_query": rephrased_query, |
| "trace": result_state.get('trace', []), |
| "websearch_results": result_state.get('websearch_results', []) if agent_name == 'websearch' else None |
| }) |
| |
| |
| st.session_state['should_stream'] = True |
| |
| |
| st.session_state['waiting_for_response'] = False |
| |
| st.rerun() |
|
|
| |
| user_input = st.chat_input("Enter your message and press Enter...", key="user_message", on_submit=set_user_input) |