import gradio as gr from gradio import ChatMessage import time import asyncio from functools import partial import random import logging import re logging.basicConfig(level=logging.INFO) sleep_time = random.randint(1, 3) thoughts = { "questioning_agent": [ "Read the project document and function list.", "Determine if the project needs a chatbot, document extraction, or both.", "Map the requirements to the provided functions only.", "Classify the project as Chatbot, Document Extraction, or Hybrid.", "Output a JSON object with the configuration type and selected functions." ], "client_initial_question": [ "Identify key topics like company background, industry, challenges, and workflows.", "List the specific client questions provided.", "Ensure each question aims to gather clear, measurable information.", "Format the questions with sample answers as specified.", "Return only the list of questions without extra commentary." ], "generate_client_follow_up": [ "Review the initial client responses.", "Pinpoint areas needing further clarification, such as project vision and current processes.", "Develop follow-up questions to explore these areas in more detail.", "Incorporate sample answers to guide the client.", "Compile a numbered list of the top follow-up questions." ], "generate_engage_questions": [ "Examine the client background and chatbot requirements.", "Focus on areas like business outcomes, conversational flow, and technical needs.", "Formulate context-aware questions to extract detailed insights.", "Include sample answers for clarity.", "Present a concise list of targeted questions." ], "generate_page_questions": [ "Review the client information related to document processing.", "Focus on document types, input/output formats, quality, and workflow mapping.", "Develop clear and relevant questions for each area.", "Provide sample answers to guide responses.", "Return a formatted list of document-focused questions." ], "generate_hybrid_questions": [ "Recognize that the project involves both chatbot and document processing needs.", "Separate the questions into two groups: one for documents and one for chatbots.", "Develop targeted questions for each group using the client context.", "Add sample answers to provide clarity.", "Combine both sets into one cohesive list." ], "generate_general_questions": [ "Review the overall client background and project requirements.", "Identify key areas such as integration, performance, and security.", "Craft context-aware questions that are precise and actionable.", "Include sample answers to illustrate expected responses.", "Return a clear list of general questions." ], "generate_further_follow_up_questions": [ "Examine the client background and previous responses in detail.", "Identify any gaps or unclear points needing further detail.", "Formulate direct follow-up questions using techniques like the 5 Whys.", "Reference prior responses to maintain context.", "List each follow-up question with sample answers for guidance." ] } async def client_initial_question(): """Return client information gathering questions.""" return """ # Client Information Gathering Questions ### Company Background and Industry 1. Can you provide some background about your company? 2. Which industry do you operate in, and what is your company's niche or specialization? 3. Who are your primary customers? 4. What are the main objectives you want to achieve? 5. What key features or functionalities do you need? ### Current Challenges 6. What are the biggest challenges your firm is currently facing? 7. Can you describe your current processes? ### Workflow and System Impact 8. How will this solution benefit your firm as a whole? ### Existing Workflow or System 9. Can you describe your current workflow or system? ### Pain Point Identification 10. Where is your current system falling short or causing delays? 11. Are there any parts of the process that are particularly time-consuming/ prone to error? """ async def clean_sample_answers(text): """Clean up sample answers in the text by removing 'Sample:' and its content. Args: text (str): Input text containing sample answers Returns: str: Cleaned text with 'Sample:' and its content replaced by 'Answer:' """ try: if not text: return text # Return early if the input text is empty # Match 'Sample:' and its quoted content, replacing with just 'Answer:' cleaned_text = re.sub(r'(?i)\s*Sample\s*:\s*"[^"]*"', '\nAnswer:', text, flags=re.MULTILINE) return cleaned_text except Exception as e: logging.error(f"Error cleaning sample answers: {e}") return text # Return original text as fallback if an error occurs async def simulate_thinking_chat(message, history): logging.info(f"Received message: {message}") logging.info(f"Initial history: {history}") start_time = time.time() response = ChatMessage( content="", metadata={"title": "_Processing_ step-by-step", "id": 0, "status": "pending"} ) yield response, "" # Determine which function to call based on history length if len(history) == 0: function_name = "client_initial_question" current_thoughts = thoughts["client_initial_question"] async_func = client_initial_question() elif len(history) <= 3: # Overlapping condition for initial questions function_name = "generate_general_questions" current_thoughts = thoughts["generate_general_questions"] async_func = generate_general_questions() # You'll need to implement this elif len(history) <= 6: # Overlapping condition for general questions function_name = "generate_further_follow_up_questions" current_thoughts = thoughts["generate_further_follow_up_questions"] async_func = generate_further_follow_up_questions() # You'll need to implement this else: # Default to client initial questions if no other case matches function_name = "client_initial_question" current_thoughts = thoughts["client_initial_question"] async_func = client_initial_question() # Create a task for getting the appropriate response response_task = asyncio.create_task(async_func) # Show thoughts from the global thoughts dictionary accumulated_thoughts = "" thought_index = 0 while not response_task.done(): thought = current_thoughts[thought_index % len(current_thoughts)] thought_index += 1 await asyncio.sleep(sleep_time) accumulated_thoughts += f"- {thought}\n\n" response.content = accumulated_thoughts.strip() yield response, "" # Get the result from the completed task result = await response_task # Clean up sample answers before returning user_input_template = await clean_sample_answers(result) response.metadata["status"] = "done" response.metadata["duration"] = time.time() - start_time yield response, "" response_list = [ response, ChatMessage(content=result) ] print(f"Function: {function_name}\nMessage: {message},\nLen: {len(history)},\nHistory: {history}") yield response_list, user_input_template # Add new async functions for the additional question types async def generate_general_questions(): await asyncio.sleep(10) # Convert to async sleep return """ # General Integration and Deployment Questions 1. What are your current system integrations? Sample: "We use Salesforce for CRM and SAP for ERP" 2. What are your security requirements? Sample: "We need SSO integration and data encryption at rest" 3. What is your expected deployment timeline? Sample: "We aim to go live within 3 months" 4. Do you have any specific performance requirements? Sample: "System should handle 1000 concurrent users" 5. What is your preferred hosting environment? Sample: "We prefer AWS cloud hosting" """ async def generate_further_follow_up_questions(): await asyncio.sleep(10) # Convert to async sleep return """ # Follow-up Questions Based on Previous Responses 1. Could you elaborate on your current workflow bottlenecks? Sample: "Manual data entry takes 4 hours daily" 2. What specific metrics would indicate project success? Sample: "50% reduction in processing time" 3. Have you identified any potential risks or challenges? Sample: "Data migration from legacy systems" 4. What is your expected ROI timeframe? Sample: "We expect to see returns within 6 months" 5. Are there any compliance requirements we should be aware of? Sample: "We need to comply with GDPR and HIPAA" """ chatbot = gr.Chatbot(height=650 ,elem_classes=["chatbot-container"], label="Project Questions") with gr.Blocks(fill_height=True) as demo: with gr.Row(): with gr.Column(scale=1): # output = gr.Textbox(label="Output") current_question = gr.Textbox(label="Edit Area", lines=30, interactive=True) # submit_btn = gr.Button("Submit") # clear_btn = gr.Button("Clear Chat") with gr.Column(scale=1): gr.ChatInterface( simulate_thinking_chat, chatbot= chatbot, type="messages", fill_height=True, additional_outputs= [current_question], flagging_mode= "manual" # show_progress= 'minimal', # save_history= True ) demo.launch()