Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import time | |
| # ------------------------- | |
| # Gradio function | |
| # ------------------------- | |
| def generate_response_gradio(message, history): | |
| """ | |
| Handles user query, preserves LangChain chat history, | |
| and tracks response time. | |
| """ | |
| history = history or [] # Gradio passes None initially | |
| # Convert Gradio history to proper format for LangChain | |
| langchain_history = [(h[0], h[1].split("\n⏱")[0]) for h in history] if history else [] | |
| start_time = time.time() | |
| # Get answer from ConversationalRetrievalChain | |
| out = qa_chain({"question": message, "chat_history": langchain_history}) | |
| answer = out["answer"] | |
| end_time = time.time() | |
| elapsed_time = end_time - start_time | |
| # Append to history in proper tuple format | |
| history.append((message, f"{answer}\n⏱ Response Time: {elapsed_time:.2f} sec")) | |
| return history, history | |
| # ------------------------- | |
| # Gradio UI | |
| # ------------------------- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🤖 Chat with HR Policies") | |
| chatbot = gr.Chatbot(label="Chat Window", height=400) | |
| txt = gr.Textbox( | |
| placeholder="Type your message and press Enter", show_label=False, autofocus=True | |
| ) | |
| txt.submit(generate_response_gradio, [txt, chatbot], [chatbot, chatbot]) | |
| txt.submit(lambda: "", None, txt) | |
| demo.launch(inline=True, share=True) | |