Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import cohere | |
| import difflib | |
| # Replace with your actual Cohere API Key | |
| co = cohere.Client("YOUR_COHERE_API_KEY") | |
| # Hardcoded FAQ knowledge base | |
| faq_data = { | |
| "What does the eligibility verification agent (EVA) do?": | |
| "EVA automates the process of verifying a patient’s eligibility and benefits information in real-time, eliminating manual data entry errors and reducing claim rejections.", | |
| "What does the claims processing agent (CAM) do?": | |
| "CAM streamlines the submission and management of claims, improving accuracy, reducing manual intervention, and accelerating reimbursements.", | |
| "How does the payment posting agent (PHIL) work?": | |
| "PHIL automates the posting of payments to patient accounts, ensuring fast, accurate reconciliation of payments and reducing administrative burden.", | |
| "Tell me about Thoughtful AI's Agents.": | |
| "Thoughtful AI provides a suite of AI-powered automation agents designed to streamline healthcare processes. These include Eligibility Verification (EVA), Claims Processing (CAM), and Payment Posting (PHIL), among others.", | |
| "What are the benefits of using Thoughtful AI's agents?": | |
| "Using Thoughtful AI's Agents can significantly reduce administrative costs, improve operational efficiency, and reduce errors in critical processes like claims management and payment posting." | |
| } | |
| SIMILARITY_THRESHOLD = 0.7 | |
| def get_faq_answer(user_input): | |
| questions = list(faq_data.keys()) | |
| match = difflib.get_close_matches(user_input, questions, n=1, cutoff=SIMILARITY_THRESHOLD) | |
| return faq_data[match[0]] if match else None | |
| def thoughtful_response(user_input, chat_history): | |
| # Append user message | |
| chat_history.append({"role": "user", "content": user_input}) | |
| # Check hardcoded knowledge base first | |
| faq_answer = get_faq_answer(user_input) | |
| if faq_answer: | |
| response = f"🧠 Thoughtful AI Agent:\n\n{faq_answer}" | |
| else: | |
| try: | |
| result = co.chat( | |
| message=user_input, | |
| model="command-r-plus", | |
| temperature=0.7, | |
| chat_history=[] # You can optionally build chat history here | |
| ) | |
| response = f"🤖 Cohere:\n\n{result.text}" | |
| except Exception as e: | |
| response = f"⚠️ Error from Cohere: {e}" | |
| # Append assistant response | |
| chat_history.append({"role": "assistant", "content": response}) | |
| return chat_history, chat_history | |
| # Build Gradio UI | |
| with gr.Blocks(title="Thoughtful AI Support Agent") as demo: | |
| gr.Markdown("## 💬 Thoughtful AI Support Agent\nAsk me about our automation agents. If I don’t know, I’ll ask my smart friend (Cohere)!") | |
| chatbot = gr.Chatbot(label="Support Chat", type="messages") | |
| user_input = gr.Textbox(placeholder="Ask a question about Thoughtful AI...", lines=1) | |
| send_button = gr.Button("Send") | |
| state = gr.State([]) | |
| send_button.click(fn=thoughtful_response, inputs=[user_input, state], outputs=[chatbot, state]) | |
| user_input.submit(fn=thoughtful_response, inputs=[user_input, state], outputs=[chatbot, state]) | |
| # Launch with shareable link | |
| demo.launch(ssr_mode=False, share=True) | |