File size: 9,077 Bytes
aed81af
739f5c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
import gradio as gr
from gradio import ChatMessage
import time
import asyncio
from functools import partial
import random
import logging

logging.basicConfig(level=logging.INFO)

sleep_time = random.randint(1, 3)

thoughts = {
  "questioning_agent": [
    "Read the project document and function list.",
    "Determine if the project needs a chatbot, document extraction, or both.",
    "Map the requirements to the provided functions only.",
    "Classify the project as Chatbot, Document Extraction, or Hybrid.",
    "Output a JSON object with the configuration type and selected functions."
  ],
  "client_initial_question": [
    "Identify key topics like company background, industry, challenges, and workflows.",
    "List the specific client questions provided.",
    "Ensure each question aims to gather clear, measurable information.",
    "Format the questions with sample answers as specified.",
    "Return only the list of questions without extra commentary."
  ],
  "generate_client_follow_up": [
    "Review the initial client responses.",
    "Pinpoint areas needing further clarification, such as project vision and current processes.",
    "Develop follow-up questions to explore these areas in more detail.",
    "Incorporate sample answers to guide the client.",
    "Compile a numbered list of the top follow-up questions."
  ],
  "generate_engage_questions": [
    "Examine the client background and chatbot requirements.",
    "Focus on areas like business outcomes, conversational flow, and technical needs.",
    "Formulate context-aware questions to extract detailed insights.",
    "Include sample answers for clarity.",
    "Present a concise list of targeted questions."
  ],
  "generate_page_questions": [
    "Review the client information related to document processing.",
    "Focus on document types, input/output formats, quality, and workflow mapping.",
    "Develop clear and relevant questions for each area.",
    "Provide sample answers to guide responses.",
    "Return a formatted list of document-focused questions."
  ],
  "generate_hybrid_questions": [
    "Recognize that the project involves both chatbot and document processing needs.",
    "Separate the questions into two groups: one for documents and one for chatbots.",
    "Develop targeted questions for each group using the client context.",
    "Add sample answers to provide clarity.",
    "Combine both sets into one cohesive list."
  ],
  "generate_general_questions": [
    "Review the overall client background and project requirements.",
    "Identify key areas such as integration, performance, and security.",
    "Craft context-aware questions that are precise and actionable.",
    "Include sample answers to illustrate expected responses.",
    "Return a clear list of general questions."
  ],
  "generate_further_follow_up_questions": [
    "Examine the client background and previous responses in detail.",
    "Identify any gaps or unclear points needing further detail.",
    "Formulate direct follow-up questions using techniques like the 5 Whys.",
    "Reference prior responses to maintain context.",
    "List each follow-up question with sample answers for guidance."
  ]
}

async def client_initial_question():
    """Return client information gathering questions."""
    return """
    # Client Information Gathering Questions
        
    ### Company Background and Industry
    1. Can you provide some background about your company?
    2. Which industry do you operate in, and what is your company's niche or specialization?
    3. Who are your primary customers?
    4. What are the main objectives you want to achieve?
    5. What key features or functionalities do you need?
        
    ### Current Challenges
    6. What are the biggest challenges your firm is currently facing?
    7. Can you describe your current processes?
        
    ### Workflow and System Impact
    8. How will this solution benefit your firm as a whole?
        
    ### Existing Workflow or System
    9. Can you describe your current workflow or system?
        
    ### Pain Point Identification
    10. Where is your current system falling short or causing delays?
    11. Are there any parts of the process that are particularly time-consuming/ prone to error?
    """

async def simulate_thinking_chat(message, history):
    logging.info(f"Received message: {message}")
    logging.info(f"Initial history: {history}")
    start_time = time.time()
    response = ChatMessage(
        content="",
        metadata={"title": "_Processing_ step-by-step", "id": 0, "status": "pending"}
    )
    yield response, ""

    # Determine which function to call based on history length
    if len(history) == 0:
        function_name = "client_initial_question"
        current_thoughts = thoughts["client_initial_question"]
        async_func = client_initial_question()
    elif len(history) <= 3:  # Overlapping condition for initial questions
        function_name = "generate_general_questions"
        current_thoughts = thoughts["generate_general_questions"]
        async_func = generate_general_questions()  # You'll need to implement this
    elif len(history) <= 6:  # Overlapping condition for general questions
        function_name = "generate_further_follow_up_questions"
        current_thoughts = thoughts["generate_further_follow_up_questions"]
        async_func = generate_further_follow_up_questions()  # You'll need to implement this
    else:
        # Default to client initial questions if no other case matches
        function_name = "client_initial_question"
        current_thoughts = thoughts["client_initial_question"]
        async_func = client_initial_question()

    # Create a task for getting the appropriate response
    response_task = asyncio.create_task(async_func)
    
    # Show thoughts from the global thoughts dictionary
    accumulated_thoughts = ""
    thought_index = 0
    while not response_task.done():
        thought = current_thoughts[thought_index % len(current_thoughts)]
        thought_index += 1
        
        await asyncio.sleep(sleep_time)
        accumulated_thoughts += f"- {thought}\n\n"
        response.content = accumulated_thoughts.strip()
        yield response, ""

    # Get the result from the completed task
    result = await response_task

    response.metadata["status"] = "done"
    response.metadata["duration"] = time.time() - start_time
    yield response, ""

    response_list = [
        response,
        ChatMessage(content=result)
    ]
    print(f"Function: {function_name}\nMessage: {message},\nLen: {len(history)},\nHistory: {history}")

    yield response_list, result

# Add new async functions for the additional question types
async def generate_general_questions():
    await asyncio.sleep(10)  # Convert to async sleep
    return """
    # General Integration and Deployment Questions
    
    1. What are your current system integrations?
       Sample: "We use Salesforce for CRM and SAP for ERP"
    
    2. What are your security requirements?
       Sample: "We need SSO integration and data encryption at rest"
    
    3. What is your expected deployment timeline?
       Sample: "We aim to go live within 3 months"
    
    4. Do you have any specific performance requirements?
       Sample: "System should handle 1000 concurrent users"
    
    5. What is your preferred hosting environment?
       Sample: "We prefer AWS cloud hosting"
    """

async def generate_further_follow_up_questions():
    await asyncio.sleep(10)  # Convert to async sleep
    return """
    # Follow-up Questions Based on Previous Responses
    
    1. Could you elaborate on your current workflow bottlenecks?
       Sample: "Manual data entry takes 4 hours daily"
    
    2. What specific metrics would indicate project success?
       Sample: "50% reduction in processing time"
    
    3. Have you identified any potential risks or challenges?
       Sample: "Data migration from legacy systems"
    
    4. What is your expected ROI timeframe?
       Sample: "We expect to see returns within 6 months"
    
    5. Are there any compliance requirements we should be aware of?
       Sample: "We need to comply with GDPR and HIPAA"
    """

chatbot = gr.Chatbot(height=650 ,elem_classes=["chatbot-container"], label="Project Questions")

with gr.Blocks(fill_height=True) as demo:
    with gr.Row():
        with gr.Column(scale=1):
            # output = gr.Textbox(label="Output")
            current_question = gr.Textbox(label="Edit Area", lines=30, interactive=True)
            # submit_btn = gr.Button("Submit")
            # clear_btn = gr.Button("Clear Chat")
        with gr.Column(scale=1):
            gr.ChatInterface(
                simulate_thinking_chat,
                chatbot= chatbot,
                type="messages",
                fill_height=True,
                additional_outputs= [current_question],
                flagging_mode= "manual"
                # show_progress= 'minimal',
                # save_history= True
            )

demo.launch()