import streamlit as st import time from io import BytesIO from datetime import datetime from groq_llm import get_llm from streaming_callback import StreamHandler from hrms_guardrails import is_hrms_query, hrms_refusal from roles import role_allowed from prompts import SYSTEM_PROMPT, ROLE_PROMPTS from ingestion import read_file from vectorstore import init_vectorstore, get_retriever from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer from reportlab.lib.styles import getSampleStyleSheet from reportlab.lib.pagesizes import A4 # ========================================================= # CHATGPT-STYLE MEMORY (RECENT CONTEXT ONLY) # ========================================================= def build_chat_history(messages, max_turns=6): history = [] for msg in messages[-max_turns:]: role = "User" if msg["role"] == "user" else "Assistant" history.append(f"{role}: {msg['content']}") return "\n".join(history) # ========================================================= # HRMS INTENT CLASSIFICATION # ========================================================= INTENTS = { "LEAVE": ["leave", "holiday", "pto", "sick"], "ATTENDANCE": ["attendance", "shift", "late", "overtime"], "PAYROLL": ["payroll", "salary", "deduction", "cutoff"], "ONBOARDING": ["onboarding", "joining", "probation"], "PERFORMANCE": ["performance", "appraisal", "review"], "COMPLIANCE": ["policy", "compliance", "sop", "rule"] } def detect_intent(question: str) -> str: q = question.lower() for intent, keywords in INTENTS.items(): if any(k in q for k in keywords): return intent return "GENERAL_HR" # ========================================================= # PAGE CONFIG # ========================================================= st.set_page_config( page_title="HRMS Chatbot", page_icon="💼", layout="wide" ) # ========================================================= # CHATGPT-LIKE PHARMA UI # ========================================================= st.markdown(""" """, unsafe_allow_html=True) # ========================================================= # SESSION STATE # ========================================================= if "messages" not in st.session_state: st.session_state.messages = [] if "vector_ready" not in st.session_state: st.session_state.vector_ready = False # ========================================================= # SIDEBAR (MINIMAL – LIKE CHATGPT) # ========================================================= with st.sidebar: st.markdown("### 💼 HRMS Chatbot") role = st.selectbox("Active Role", ["Employee", "HR", "Manager"]) st.markdown("---") uploaded = st.file_uploader("Upload HR Document", type=["pdf", "docx", "txt"]) if uploaded: with st.spinner("Indexing HR document..."): text = read_file(uploaded) init_vectorstore([text]) st.session_state.vector_ready = True st.success("HR document indexed") st.markdown("---") if st.button("🆕 New Chat"): st.session_state.messages = [] st.session_state.vector_ready = False st.rerun() if st.session_state.messages: def generate_pdf(): buffer = BytesIO() doc = SimpleDocTemplate(buffer, pagesize=A4) styles = getSampleStyleSheet() story = [] story.append(Paragraph("HRMS Chat Report", styles["Title"])) story.append(Spacer(1, 10)) story.append(Paragraph(f"Role: {role}", styles["Normal"])) story.append(Paragraph( f"Generated on: {datetime.now().strftime('%d %b %Y %H:%M')}", styles["Normal"] )) story.append(Spacer(1, 14)) for msg in st.session_state.messages: prefix = "Q:" if msg["role"] == "user" else "A:" story.append( Paragraph(f"{prefix} {msg['content']}", styles["Normal"]) ) story.append(Spacer(1, 8)) doc.build(story) buffer.seek(0) return buffer st.download_button( "📄 Download Chat (PDF)", data=generate_pdf(), file_name="hrms_chat_report.pdf", mime="application/pdf" ) # ========================================================= # HEADER # ========================================================= st.markdown("""

💼 HRMS Chatbot

Advanced HRMS assistant for Employees, HR, and Managers

""", unsafe_allow_html=True) st.markdown( f"**Active Role:** {role}", unsafe_allow_html=True ) # ========================================================= # CHAT HISTORY (CHATGPT STYLE) # ========================================================= for msg in st.session_state.messages: with st.chat_message(msg["role"]): st.markdown(msg["content"]) # ========================================================= # CHAT INPUT (ENTER = SEND | SHIFT+ENTER = NEW LINE) # ========================================================= question = st.chat_input( "Message HRMS Chatbot… (Enter to send • Shift+Enter for new line)" ) if question: # ---------------- USER ---------------- st.session_state.messages.append({"role": "user", "content": question}) with st.chat_message("user"): st.markdown(question) # ---------------- ASSISTANT (CHATGPT STYLE) ---------------- with st.chat_message("assistant"): response_container = st.empty() response_container.markdown("_Thinking…_") if not is_hrms_query(question) or not role_allowed(role, question): answer = hrms_refusal() response_container.markdown(answer) else: intent = detect_intent(question) retriever = get_retriever() context_docs = ( retriever.get_relevant_documents(intent) if retriever and st.session_state.vector_ready else [] ) chat_history = build_chat_history(st.session_state.messages) final_prompt = f""" {SYSTEM_PROMPT} ROLE CONTEXT: {ROLE_PROMPTS[role]} INTENT: {intent} CONVERSATION HISTORY: {chat_history} HR DOCUMENT CONTEXT: {context_docs} USER QUESTION: {question} RESPONSE GUIDELINES: - Use clear Markdown formatting - Use bullet points and headings when helpful - Be concise but complete - Stay strictly within HRMS scope """ stream_handler = StreamHandler(response_container) llm = get_llm(callbacks=[stream_handler]) llm.invoke(final_prompt) answer = stream_handler.text # ---------------- SAVE ASSISTANT MESSAGE ---------------- st.session_state.messages.append( {"role": "assistant", "content": answer} )