ICAS03 commited on
Commit
217d90f
·
1 Parent(s): 47258ec

- Fix the dynamic sequence

Browse files

- changed the prompt for generate_engage_follow_up
- removed some old prompts

Files changed (4) hide show
  1. Project.py +81 -70
  2. app.py +6 -63
  3. event_handler.py +75 -5
  4. prompt_configs.py +90 -238
Project.py CHANGED
@@ -186,6 +186,7 @@ class Project:
186
  }
187
  )
188
 
 
189
  def gather_project_input(self):
190
  """Generate context-aware questions to gather project requirements"""
191
  return self.execute_prompt(
@@ -204,79 +205,89 @@ class Project:
204
  }
205
  )
206
 
 
207
  ## Generate PRD and components from project details ##
208
  def generate_prd_and_components(self, progress=gr.Progress()):
209
- """Generate PRD and components from project details, streaming results"""
210
- results = set()
211
-
212
- # Generate PRD
213
- yield "Generating PRD...", results
214
- prd_response = self.execute_prompt(
215
- "generate_prd",
216
- {
217
- "project_detail": self.get_project_detail()
218
- }
219
- )
220
-
221
- # Parse the PRD JSON response
222
- try:
223
- prd_json = json.loads(prd_response)
224
- self.generated_prd = prd_json.get("detailed_breakdown", "")
225
- except json.JSONDecodeError:
226
- print("Warning: Could not parse PRD as JSON, using raw response")
227
- self.generated_prd = prd_response
228
-
229
- # Add PRD to results and yield update
230
- results.add(("generate_prd", self.generated_prd))
231
- yield "PRD generation complete", results
232
-
233
- try:
234
- yield "Analyzing configuration with component agent...", results
235
- configuration_output = self.execute_prompt(
236
- "component_agent",
237
- {
238
- "generated_prd": self.generated_prd
239
- }
240
- )
241
-
242
- results.add(("component_agent", configuration_output))
243
- yield "Component analysis complete", results
244
-
245
- # Parse configuration output
246
- try:
247
- cleaned_output = configuration_output
248
- if "```json" in cleaned_output:
249
- cleaned_output = cleaned_output.split("```json")[1].split("```")[0].strip()
250
- elif "```" in cleaned_output:
251
- cleaned_output = cleaned_output.split("```")[1].split("```")[0].strip()
252
-
253
- config = json.loads(cleaned_output)
254
- selected_functions = config[0]["selected_functions"]
255
- yield f"Selected {len(selected_functions)} components to generate", results
256
-
257
- except (json.JSONDecodeError, KeyError, IndexError) as e:
258
- yield f"Warning: Could not parse configuration output ({str(e)})", results
259
- return
260
-
261
- except Exception as e:
262
- yield f"Error in analyzing configuration: {str(e)}", results
263
- return
264
-
265
- # Execute each function and stream results
266
- for i, function_name in enumerate(selected_functions, 1):
267
- try:
268
- yield f"Generating component {i}/{len(selected_functions)}: {function_name}...", results
269
- result = self.execute_prompt(function_name)
270
- results.add((function_name, result))
271
- yield f"Successfully generated {function_name}", results
272
-
273
- except Exception as e:
274
- yield f"Error executing {function_name}: {str(e)}", results
275
- continue
276
-
277
- yield "All components generated successfully!", results
278
-
 
 
 
 
 
 
 
 
279
 
280
 
 
281
 
282
 
 
186
  }
187
  )
188
 
189
+ #TODO: To change
190
  def gather_project_input(self):
191
  """Generate context-aware questions to gather project requirements"""
192
  return self.execute_prompt(
 
205
  }
206
  )
207
 
208
+ ##########################################################
209
  ## Generate PRD and components from project details ##
210
  def generate_prd_and_components(self, progress=gr.Progress()):
211
+ """Generate PRD and components from project details, streaming results"""
212
+ results = [] # Use a list instead of a set to maintain order
213
+
214
+ # Generate PRD
215
+ yield "Generating PRD...", results
216
+ prd_response = self.execute_prompt(
217
+ "generate_prd",
218
+ {
219
+ "project_detail": self.get_project_detail()
220
+ }
221
+ )
222
+
223
+ # Parse the PRD JSON response
224
+ try:
225
+ prd_json = json.loads(prd_response)
226
+ self.generated_prd = prd_json.get("detailed_breakdown", "")
227
+ except json.JSONDecodeError:
228
+ print("Warning: Could not parse PRD as JSON, using raw response")
229
+ self.generated_prd = prd_response
230
+
231
+ # Add PRD to results and yield update
232
+ results.append(("generate_prd", self.generated_prd))
233
+ yield "PRD generation complete", results
234
+
235
+ try:
236
+ yield "Analyzing configuration with component agent...", results
237
+ configuration_output = self.execute_prompt(
238
+ "component_agent",
239
+ {
240
+ "generated_prd": self.generated_prd
241
+ }
242
+ )
243
+
244
+ results.append(("component_agent", configuration_output)) # Use append
245
+ yield "Component analysis complete", results
246
+
247
+ # Parse configuration output
248
+ try:
249
+ cleaned_output = configuration_output
250
+ if "```json" in cleaned_output:
251
+ cleaned_output = cleaned_output.split("```json")[1].split("```")[0].strip()
252
+ elif "```" in cleaned_output:
253
+ cleaned_output = cleaned_output.split("```")[1].split("```")[0].strip()
254
+
255
+ config = json.loads(cleaned_output)
256
+ selected_functions = config[0]["selected_functions"]
257
+ yield f"Selected {len(selected_functions)} components to generate", results
258
+
259
+ except (json.JSONDecodeError, KeyError, IndexError) as e:
260
+ yield f"Warning: Could not parse configuration output ({str(e)})", results
261
+ return
262
+
263
+ except Exception as e:
264
+ yield f"Error in analyzing configuration: {str(e)}", results
265
+ return
266
+
267
+ # Execute each function and stream results
268
+ for i, function_name in enumerate(selected_functions, 1):
269
+ try:
270
+ yield f"Generating component {i}/{len(selected_functions)}: {function_name}...", results
271
+ result = self.execute_prompt(function_name)
272
+ results.append((function_name, result)) # Use append
273
+ yield f"Successfully generated {function_name}", results
274
+
275
+ except Exception as e:
276
+ yield f"Error executing {function_name}: {str(e)}", results
277
+ continue
278
+
279
+ yield "All components generated successfully!", results
280
+
281
+ def generate_mandays(self, progress=gr.Progress()):
282
+ """Generate PRD and components from project details, streaming results"""
283
+ results = [] # Use a list instead of a set to maintain order
284
+
285
+ # Generate PRD
286
+ yield "Generating Mandays...", results
287
+
288
+
289
 
290
 
291
+
292
 
293
 
app.py CHANGED
@@ -2,7 +2,7 @@ from typing import Tuple
2
  import gradio as gr
3
  from Project import *
4
  from common_functions_v4 import *
5
- from event_handler import generate_content, setup_all_handlers
6
  from google_drive import *
7
  from notion import *
8
  from state import state
@@ -168,7 +168,7 @@ def create_quotation_generator_section():
168
  page_notes_box = None
169
  page_save_quotation_btn = None
170
  project_name = None
171
- generation_results = gr.State(set())
172
 
173
  with gr.Tab(label="Quotation Generator"):
174
  with gr.Row():
@@ -215,7 +215,6 @@ def create_quotation_generator_section():
215
  # Create main step accordions
216
  for step_name, sub_steps in step_outputs.items():
217
  with gr.Accordion(step_name, open=False):
218
- # Create button for this step
219
  try:
220
  if ' : ' in step_name:
221
  button_label = step_name.split(' : ')[1]
@@ -225,66 +224,10 @@ def create_quotation_generator_section():
225
  button_label = step_name
226
 
227
  step_buttons[step_name] = gr.Button(f"✅ Generate {button_label}")
228
-
229
- # Create a unique results container for this step
230
- results_container = gr.Column(visible=False)
231
- with results_container:
232
- gr.Markdown("## Generated Results")
233
-
234
- def create_render_results(step):
235
- @gr.render(inputs=[generation_results])
236
- def render_results(results):
237
- if not results:
238
- return gr.Markdown("No results generated yet.")
239
-
240
- components = []
241
- relevant_results = [
242
- (fname, res) for fname, res in results
243
- if PROMPTS.get(fname) and PROMPTS[fname].step == step
244
- ]
245
-
246
- if not relevant_results:
247
- return gr.Markdown("No results generated for this step.")
248
-
249
- for function_name, result in relevant_results:
250
- prompt_config = PROMPTS.get(function_name)
251
-
252
- components.extend([
253
- gr.Group([
254
- gr.Markdown(f"### {prompt_config.description if prompt_config else function_name}"),
255
- gr.Row([
256
- gr.Column([
257
- gr.Textbox(
258
- value=result,
259
- label=f"{function_name} Text Output",
260
- lines=10,
261
- interactive=True
262
- )
263
- ], scale=1),
264
- gr.Column([
265
- gr.Markdown(
266
- value=result,
267
- show_copy_button=True,
268
- elem_classes=["scrollable-markdown"]
269
- )
270
- ], scale=1)
271
- ])
272
- ])
273
- ])
274
-
275
- return components
276
-
277
- return render_results
278
-
279
- step_buttons[str(step_name)].click(
280
- fn=generate_content,
281
- outputs=[page_progress_update, generation_results],
282
- queue=True
283
- ).then(
284
- fn=create_render_results(step_name),
285
- inputs=[generation_results]
286
- )
287
-
288
  with gr.Column(scale=1):
289
  with gr.Row():
290
  with gr.Column(scale=2):
 
2
  import gradio as gr
3
  from Project import *
4
  from common_functions_v4 import *
5
+ from event_handler import create_render_results, setup_all_handlers
6
  from google_drive import *
7
  from notion import *
8
  from state import state
 
168
  page_notes_box = None
169
  page_save_quotation_btn = None
170
  project_name = None
171
+ generation_results = gr.State([])
172
 
173
  with gr.Tab(label="Quotation Generator"):
174
  with gr.Row():
 
215
  # Create main step accordions
216
  for step_name, sub_steps in step_outputs.items():
217
  with gr.Accordion(step_name, open=False):
 
218
  try:
219
  if ' : ' in step_name:
220
  button_label = step_name.split(' : ')[1]
 
224
  button_label = step_name
225
 
226
  step_buttons[step_name] = gr.Button(f"✅ Generate {button_label}")
227
+
228
+ # Create the render function for this specific step
229
+ create_render_results(step_name, generation_results)
230
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
  with gr.Column(scale=1):
232
  with gr.Row():
233
  with gr.Column(scale=2):
event_handler.py CHANGED
@@ -7,20 +7,90 @@ import json
7
  from prompt_configs import PROMPTS
8
  from typing import Generator, Tuple
9
 
10
- def generate_content(progress=gr.Progress()) -> Generator[Tuple[str, set] , Any , None]:
 
11
  """Generate content using the Project instance"""
12
  try:
13
  # Generate PRD and components, yielding results as they're generated
14
  for status_msg, result in state.quotation_project.generate_prd_and_components(progress):
15
- # Yield both the status message and current results
16
  yield status_msg, result
17
 
18
  except Exception as e:
19
- print(f"Error during generation: {str(e)}") # Add debug print
20
- yield f"Error during generation: {str(e)}", set()
 
 
 
 
 
 
 
21
 
 
 
 
 
22
  def setup_all_handlers(step_buttons, all_components, progress_update, quotation_cost=None, recalc_btn=None, upload_drive_btn=None, upload_notion_btn=None, project_name=None, generation_results=None):
23
  """Set up all step handlers with the provided UI components"""
24
  # Modified button click chain
 
 
 
 
 
 
 
 
 
 
 
 
25
  return generation_results
26
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  from prompt_configs import PROMPTS
8
  from typing import Generator, Tuple
9
 
10
+
11
+ def generate_step_1(progress=gr.Progress()) -> Generator[Tuple[str, list], Any, None]:
12
  """Generate content using the Project instance"""
13
  try:
14
  # Generate PRD and components, yielding results as they're generated
15
  for status_msg, result in state.quotation_project.generate_prd_and_components(progress):
 
16
  yield status_msg, result
17
 
18
  except Exception as e:
19
+ print(f"Error during generation: {str(e)}")
20
+ yield f"Error during generation: {str(e)}", []
21
+
22
+ def generate_step_2(progress=gr.Progress()) -> Generator[Tuple[str, list], Any, None]:
23
+ """Generate content using the Project instance"""
24
+ try:
25
+ # Generate PRD and components, yielding results as they're generated
26
+ for status_msg, result in state.quotation_project.generate_prd_and_components(progress):
27
+ yield status_msg, result
28
 
29
+ except Exception as e:
30
+ print(f"Error during generation: {str(e)}")
31
+ yield f"Error during generation: {str(e)}", []
32
+
33
  def setup_all_handlers(step_buttons, all_components, progress_update, quotation_cost=None, recalc_btn=None, upload_drive_btn=None, upload_notion_btn=None, project_name=None, generation_results=None):
34
  """Set up all step handlers with the provided UI components"""
35
  # Modified button click chain
36
+ step_buttons['Step 1 : Scope & Components'].click(
37
+ fn=generate_step_1,
38
+ outputs=[progress_update, generation_results],
39
+ queue=True
40
+ )
41
+
42
+ step_buttons['Step 2 : Mandays & Quotation'].click(
43
+ fn=generate_step_1,
44
+ outputs=[progress_update, generation_results],
45
+ queue=True
46
+ )
47
+
48
  return generation_results
49
+
50
+ def create_render_results(step, generation_results):
51
+ @gr.render(inputs=[generation_results])
52
+ def render_results(results):
53
+ if not results:
54
+ return [gr.Markdown("No results generated yet.")]
55
+
56
+ relevant_results = [
57
+ (fname, res) for fname, res in results
58
+ if PROMPTS.get(fname) and PROMPTS[fname].step == step
59
+ ]
60
+
61
+ if not relevant_results:
62
+ return [gr.Markdown("No results generated for this step.")]
63
+
64
+ # Create components as a list
65
+ result_components = []
66
+ for function_name, result in relevant_results:
67
+ prompt_config = PROMPTS.get(function_name)
68
+ description = prompt_config.description if prompt_config else function_name
69
+
70
+ with gr.Accordion(f"{description}", open=False):
71
+ result_components.extend([
72
+ gr.Row([
73
+ gr.Column(
74
+ gr.Textbox( # Positional argument (child component)
75
+ value=result,
76
+ label="Text Output",
77
+ lines=10,
78
+ interactive=True
79
+ ),
80
+ scale=1 # Keyword argument
81
+ ),
82
+ gr.Column(
83
+ gr.Markdown( # Positional argument (child component)
84
+ value=result,
85
+ show_copy_button=True,
86
+ elem_classes=["scrollable-markdown"]
87
+ ),
88
+ scale=1 # Keyword argument
89
+ )
90
+ ])
91
+ ])
92
+
93
+ # Return the list of components directly
94
+ return result_components
95
+
96
+ return render_results
prompt_configs.py CHANGED
@@ -227,28 +227,56 @@ PROMPTS = {
227
  "generate_engage_follow_up_questions": PromptConfig(
228
  prompt=
229
  """
230
- You are a Software Development project manager responsible for gathering detailed requirements in order to generate an accurate and comprehensive quotation for a client's Software Development project.
231
- Your task is to ask good follow up questions based on what you already know
232
- Consider the following inputs:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
 
234
- 1. **Requirements Rubric:** Use this as a baseline list of initial questions of the Software Development project
235
- 2. **Project Details:** Background information on the client, industry, and high-level project requirements.
236
-
237
- ### Instructions:
238
- - Analyze the rubric and project details to generate clear, concise, succinct questions
239
- - If the project requirement already includes an answer to a question in the rubric, do not generate that question.
240
- - If the project requirement had not already include the answer to a question , then DO generate the question based on the requirements rubric following the section name and criteria.
241
- - Ensure your generated questions are highly relevant and specific to the client's business context. Avoid general or vague questions; tailor each question to the client's specific operations.
242
- - Include realistic sample answers relevant to the client's context
243
-
244
-
245
- ### Output Format:
246
- Give your output as string in the following format:
247
- <index><question>(sample answers)
248
-
249
- Just return the string text and NOTHING else, omit code guards.
250
  """,
251
- inputs=["project_detail", "requirements_rubric"],
252
  outputs=["generated_engage_follow_up_questions"],
253
  model=ModelType.O1_MINI,
254
  description="Generate Engage Context-Aware List Of Questions",
@@ -326,13 +354,18 @@ PROMPTS = {
326
  }
327
  ),
328
 
329
- "generate_engage_further_follow_up_questions": PromptConfig(
330
  prompt=
331
  """
332
- You are a Chatbot Development Expert specializing in creating intelligent, user-friendly, and scalable AI chatbot solutions for startups. You will be provided with client background information.
333
 
334
- Your task is identify gaps in chatbot project requirements and generate highly specific and actionable follow-up questions to clarify underlying needs. Leverage frameworks such as the 5 Whys and root cause analysis for deeper exploration.
335
- Ensure questions are tailored, referencing prior context or statements for precision.
 
 
 
 
 
336
 
337
  Requirements:
338
  You need to FULLY read the input which is given below client background information.
@@ -360,36 +393,6 @@ PROMPTS = {
360
  )
361
  }
362
  ),
363
-
364
- "generate_page_further_follow_up_questions": PromptConfig(
365
- prompt=
366
- """
367
- You are a Web Development Expert with extensive experience designing scalable, secure, and robust document processing applications. Your task is to review a detailed Q&A about a proposed document processing web application and identify gaps or ambiguities. Leverage frameworks such as the 5 Whys and root cause analysis for deeper exploration.
368
- Ensure questions are tailored, referencing prior context or statements for precision.
369
- Requirements:
370
- You need to FULLY read the input which is given below client background information.
371
- Generate follow-up questions to identify missing details or ambiguities.
372
- Use specific references to prior responses for continuity. For example: "You mentioned [context]. Can you elaborate on [specific aspect]?"
373
- Apply the 5 Whys to delve deeper where necessary. For example: " You mentioned [specific pain point]. Why does this occur, and what are the downstream impacts?"
374
- Highlight systemic issues where patterns emerge (e.g., manual processes across multiple challenges).
375
- # Output Format:
376
- <index><question>(sample answers)
377
- Just return the formatted list as string and nothing else.
378
- """,
379
- inputs=['project_detail'],
380
- outputs=['generated_page_further_follow_up_questions'],
381
- model=ModelType.O1_MINI,
382
- description="Generate Page Further Follow Up Questions",
383
- step="Chatbot Prompt Editors",
384
- ui={
385
- "page_further_follow_up_prompt_editor": UIConfig(
386
- component_type=UIComponentType.TEXTBOX,
387
- label="Page Further Follow Up Questions Prompt",
388
- lines=20,
389
- interactive=True
390
- )
391
- }
392
- ),
393
 
394
  #########################################################################################
395
  "generate_prd": PromptConfig(
@@ -506,7 +509,7 @@ PROMPTS = {
506
 
507
  Tech Stack:
508
  Backend: FastAPI, Python
509
- Chatbot: Chatbot Builder , COZE , Yellow.ai
510
  Infrastructure: AWS, PostgreSQL, Redis, Docker, Alembic
511
 
512
  Output Format:
@@ -539,61 +542,6 @@ PROMPTS = {
539
  )
540
  }
541
  ),
542
-
543
- #IGNORE FIRST
544
- "generate_engage_plan_test_components": PromptConfig(
545
- prompt=
546
- """
547
- You are an expert in chatbot project planning and testing. Your task is to create a highly detailed, actionable, and project-specific Component List for a chatbot project, focusing exclusively on the Planning and Testing phases. excluding Development phase. The response must align with the project's goals, technical stack, and compliance requirements, ensuring granularity, specificity, and adherence to the provided Project Requirement Document (PRD).
548
-
549
- Instructions:
550
- Break the project into the following phases:
551
- 1. Planning Phase ( Focus on: Project initiation ,Requirement gathering,Technical architecture design, Integration planning)
552
- 2. Testing Phase (Focus on: Integration testing
553
- System testing
554
- User Acceptance Testing (UAT) )
555
-
556
- Components:
557
- For each phase, include project-specific components that align with the goal of developing the chatbot Project. Break down each phase into granular sub-components and tasks, ensuring specificity and alignment with the PRD.
558
- Use the PRD to extract relevant, granular components that reflect the tasks and deliverables unique to this project.
559
- Ensure components are actionable and tied to the technical stack
560
-
561
- Tech Stack:
562
- Backend: FastAPI, Python
563
- Chatbot: Chatbot Builder , COZE , Yellow.ai
564
- Infrastructure: AWS, PostgreSQL, Redis, Docker, Alembic
565
-
566
- Output Format:
567
- Return the final tables and nothing else. Do not provide a summary at the end.
568
- Use bullet points for clarity and ensure each component is concise yet descriptive.
569
- Include sub-bullets for tasks or subcomponents where necessary to provide additional detail.
570
- """,
571
- inputs=["generated_prd"],
572
- outputs=["generated_plan_test_components"],
573
- model=ModelType.O1_MINI,
574
- description="Generate planning and testing components",
575
- step="Step 1 : Scope & Components",
576
- ui={
577
- "plan_test_prompt_editor": UIConfig(
578
- component_type=UIComponentType.TEXTBOX,
579
- label="Plan & Test Component Prompt",
580
- lines=20,
581
- interactive=True
582
- ),
583
- "generated_plan_test_components_text": UIConfig(
584
- component_type=UIComponentType.TEXTBOX,
585
- label="Plan Test Components",
586
- lines=20,
587
- visible=True
588
- ),
589
- "generated_plan_test_components_markdown": UIConfig(
590
- component_type=UIComponentType.MARKDOWN,
591
- label="Plan Test Components",
592
- visible=True,
593
- show_copy_button=True
594
- )
595
- }
596
- ),
597
 
598
  "generate_page_dev_components": PromptConfig(
599
  prompt=
@@ -900,6 +848,7 @@ PROMPTS = {
900
  )
901
  }
902
  ),
 
903
  "reformat_hybrid_dev_components": PromptConfig(
904
  prompt=
905
  """
@@ -980,151 +929,54 @@ PROMPTS = {
980
  }
981
  ),
982
 
983
- "generate_page_plan_test_mandays": PromptConfig(
984
  prompt=
985
  """
986
- You are an experienced project manager tasked to create a detailed task breakdown for a project based on the planning and testing component list.
987
 
988
  Objective:
989
- Generate a structured CSV output with manday estimates for each planning and testing component.
990
 
991
  Instructions:
992
- 1. Use the planning and testing component list to identify all components
993
- 2. For each component:
994
- - Estimate mandays between 0.2 and 5 days based on real-world complexity
995
- - Provide a clear description of deliverables and outcomes
996
- - Ensure estimates account for potential delays or complications
997
-
998
- Output Format Requirements:
999
- - Generate a CSV with EXACTLY these column headers: "component,mandays,description"
1000
- - Each row must have all three columns filled
1001
- - Numeric values should not be quoted
1002
- - Text values must be enclosed in double quotes
1003
- - No empty rows or missing values
1004
-
1005
- Example Output:
1006
- component,mandays,description
1007
- "Project Planning",2.5,"Detailed project planning including timeline and resource allocation"
1008
- "Requirements Analysis",1.5,"Analysis and documentation of system requirements"
1009
-
1010
- Return only the CSV content, no code blocks or additional text.
1011
- """,
1012
- inputs=["generated_plan_test_components"],
1013
- outputs=["generated_plan_test_mandays"],
1014
- model=ModelType.O1_MINI,
1015
- description="Step 2.1 : Generate planning and testing mandays",
1016
- step="Step 2 : Mandays & Quotation",
1017
- sub_step="Step 2.1 : Generate Mandays",
1018
- ui={
1019
- "plan_test_mandays_prompt_editor": UIConfig(
1020
- component_type=UIComponentType.TEXTBOX,
1021
- label="Planning & Testing Mandays System Prompt",
1022
- lines=20,
1023
- interactive=True
1024
- ),
1025
- "generated_plan_test_mandays_dataframe": UIConfig(
1026
- component_type=UIComponentType.DATAFRAME,
1027
- label="Plan & Test Mandays",
1028
- interactive=True
1029
- )
1030
- }
1031
- ),
1032
-
1033
- "generate_engage_plan_test_mandays": PromptConfig(
1034
- prompt=
1035
- """
1036
- You are an experienced project manager tasked to create a detailed task breakdown for a project based on the planning and testing component list and the development component list.
1037
-
1038
- Objective:
1039
- Generate a structured output organized by component. Provide a table with the following columns:
1040
- - Component: The name of component, as defined in the component list. ()
1041
- - Manday: The estimated effort required for a one-person team to complete the task, based on real-world complexity and scope.
1042
- - Description: A detailed explanation of the task, including deliverables or outcomes, as defined in the component list.
1043
-
1044
- Instruction:
1045
  1. Input:
1046
- - Use the planning and testing component list identify all components and subcomponents. The hierarchy of the document is Phase -> Component -> Subcomponent -> Task
 
1047
 
1048
  2. Manday Estimation:
1049
- Assign a manday estimate for each component based on the complexity and effort required, ensuring it falls between 0.2 and 5 days.
1050
- Ensure estimates are based on real-world complexity and scope while accounting for potential delays or complications.
1051
-
1052
- **Output Format**:
1053
- Create a CSV file with the following columns:
1054
- "component",,"subcomponent","mandays","description"
1055
- Just return the csv text and NOTHING else, omit the ``` code guards.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1056
  """,
1057
  inputs=["generated_plan_test_components"],
1058
  outputs=["generated_plan_test_mandays"],
1059
  model=ModelType.O1_MINI,
1060
- description="Step 2.1 : Generate Plan Test Mandays",
1061
  step="Step 2 : Mandays & Quotation",
1062
  sub_step="Step 2.1 : Generate Mandays",
1063
- ui={
1064
- "plan_test_mandays_prompt_editor": UIConfig(
1065
- component_type=UIComponentType.TEXTBOX,
1066
- label="Manday Estimator Prompt",
1067
- lines=20,
1068
- interactive=True
1069
- ),
1070
- "generated_plan_test_mandays_dataframe": UIConfig(
1071
- component_type=UIComponentType.DATAFRAME,
1072
- label="Plan Test Mandays",
1073
- interactive=True,
1074
- visible=True
1075
- )
1076
- }
1077
  ),
1078
 
1079
- "generate_page_plan_test_mandays": PromptConfig(
1080
- prompt=
1081
- """
1082
- You are an experienced project manager tasked to create a detailed task breakdown for a project based on the planning and testing component list.
1083
-
1084
- Objective:
1085
- Generate a structured CSV output with manday estimates for each planning and testing component.
1086
-
1087
- Instructions:
1088
- 1. Use the planning and testing component list to identify all components
1089
- 2. For each component:
1090
- - Estimate mandays between 0.2 and 5 days based on real-world complexity
1091
- - Provide a clear description of deliverables and outcomes
1092
- - Ensure estimates account for potential delays or complications
1093
-
1094
- Output Format Requirements:
1095
- - Generate a CSV with EXACTLY these column headers: "component,mandays,description"
1096
- - Each row must have all three columns filled
1097
- - Numeric values should not be quoted
1098
- - Text values must be enclosed in double quotes
1099
- - No empty rows or missing values
1100
-
1101
- Example Output:
1102
- component,mandays,description
1103
- "Project Planning",2.5,"Detailed project planning including timeline and resource allocation"
1104
- "Requirements Analysis",1.5,"Analysis and documentation of system requirements"
1105
-
1106
- Return only the CSV content, no code blocks or additional text.
1107
- """,
1108
- inputs=["generated_plan_test_components"],
1109
- outputs=["generated_plan_test_mandays"],
1110
- model=ModelType.O1_MINI,
1111
- description="Step 2.1 : Generate planning and testing mandays",
1112
- step="Step 2 : Mandays & Quotation",
1113
- sub_step="Step 2.1 : Generate Mandays",
1114
- ui={
1115
- "plan_test_mandays_prompt_editor": UIConfig(
1116
- component_type=UIComponentType.TEXTBOX,
1117
- label="Planning & Testing Mandays System Prompt",
1118
- lines=20,
1119
- interactive=True
1120
- ),
1121
- "generated_plan_test_mandays_dataframe": UIConfig(
1122
- component_type=UIComponentType.DATAFRAME,
1123
- label="Plan & Test Mandays",
1124
- interactive=True
1125
- )
1126
- }
1127
- ),
1128
  # Jess : Combine both into one
1129
  "generate_dev_mandays": PromptConfig(
1130
  prompt=
 
227
  "generate_engage_follow_up_questions": PromptConfig(
228
  prompt=
229
  """
230
+ **You are a Software Development Expert specializing in scalable, secure, and robust chatbot systems.
231
+ You will be provided with client background information and a requirements rubric.
232
+ Your task is to create a dynamic, context-aware list of questions to collaboratively gather client requirements for a chatbot application.
233
+ Use the requirements rubric as a baseline. Generate additional, relevant questions on top of this baseline where appropriate.
234
+ Use the client's context to add clarity or relevance to the question.
235
+ Each question should provide actionable insights to uncover critical details about client needs and include sample answers as guidance to the client.
236
+
237
+ Areas to Cover:
238
+ Business Requirements:
239
+ <client context>, What specific business outcomes should this chatbot achieve? (e.g., providing information, assisting users, facilitating specific tasks like event-related inquiries)
240
+ <client context>, Which current customer service challenges should the chatbot address? How are these challenges currently being addressed? Should this chatbot replace or complement existing customer service channels? Can you give more details? (e.g., long response times, lack of 24/7 support)
241
+ Conversational Design:
242
+ <client context>, Should the chatbot handle tasks that require only one piece of information, like retrieving an order ID? (e.g., yes, for order status checks)
243
+ <client context>, Can you provide 2 end-to-end expected conversation flows? (e.g., user asks for order status, chatbot obtains order ID, checks status, and notifies user)
244
+ <client context>, What are the most frequently asked questions by customers currently? Can you provide an expected number of questions in this FAQ or some sample questions? Where are the answers to these questions stored right now? (e.g., business hours, promotions)
245
+ <client context>, How should the chatbot handle multiple failed attempts? When should the chatbot escalate to a human agent after failed attempts? (e.g., after 3 failed attempts, escalate to a live agent)
246
+ <client context>, Will the chatbot handle complex tasks that involve multiple steps, such as booking appointments? If so, can you provide an expected range of questions that fall under this category and some sample scenarios? (e.g., booking a doctor’s appointment)
247
+ Technical Integration:
248
+ <client context>, Where would you prefer the chatbot to be hosted? On your own servers, in the cloud, or a mix of both? (e.g., cloud-based hosting for scalability)
249
+ <client context>, Do you need the chatbot to be deployed in a specific geographical region for data residency or compliance reasons? (e.g., EU region for GDPR compliance)
250
+ <client context>, Which platforms would you like the chatbot to be available on (e.g., website, mobile app, social media)? Which platform should it be prioritized? (e.g. website and mobile app as priority)
251
+ <client context>, Are there any existing tools or systems (e.g., CRM, ERP, customer service software) the chatbot should connect to? If so, what are the software names? (e.g., Salesforce, Zendesk)
252
+ <client context>, Do these systems provide APIs for integration? (e.g., yes, Salesforce and Zendesk provide APIs)
253
+ <client context>, How do you envision the chatbot interacting with these tools or systems? Will it need to fetch data, trigger actions, or both? (e.g., fetch customer data and create support tickets)
254
+ Data Requirements:
255
+ <client context>, What type of data formats will you expect the chatbot to process and handle? Will they be in text, files, images, audio, or perhaps video form? (e.g., text and images for product inquiries)
256
+ <client context>, Should the chatbot have the ability to update or modify data in these systems (e.g., creating tickets, updating customer information)? (e.g., yes, update customer profiles)
257
+ <client context>, How does your company store data? Is it in databases, spreadsheets, or cloud storage? If so, provide the name of the database. (e.g., MySQL database, Google Sheets)
258
+ <client context>, How often does the data in your system update? Is it updated in real-time, daily, or weekly? (e.g., real-time updates for inventory levels)
259
+ <client context>, Does the chatbot need to retrieve and provide real-time updates on things like prices, stock levels, or delivery status? How are these real-time data being stored or accessed? (e.g., yes, via API integration with inventory management system)
260
+ Performance and Scalability:
261
+ <client context>, What are the expected peak hours and maximum volume during these periods? Include timezone considerations and seasonal peaks. (e.g., 10 AM–2 PM, 500 concurrent users during holiday season)
262
+ <client context>, What is the expected number of simultaneous users accessing the chatbot? What is the required capacity for parallel conversations? (e.g., 200 concurrent users, 500 parallel conversations)
263
+ <client context>, What are the required response times and reliability expectations for the chatbot? How should we define server capacity and API limits to maintain optimal performance, even during peak traffic? (e.g., response time under 2 seconds, 99.9% uptime)
264
+ Security and Compliance:
265
+ <client context>, Will the chatbot handle any sensitive data? Are there industry-specific regulations to follow? (e.g., yes, GDPR compliance for customer data)
266
+ <client context>, What security measures are required to protect the data processed by the chatbot? (e.g., encryption for data in transit and at rest)
267
+ User Experience:
268
+ <client context>, Who are the key user personas for the chatbot? What are their primary needs (e.g., language differences)? (e.g., customers, support agents, multilingual support)
269
+ <client context>, What languages should the chatbot support? What tone should the chatbot use (e.g., friendly, formal)? (e.g., English and Spanish, friendly tone)
270
+ <client context>, What kind of metrics do you want to collect from the user, e.g., customer satisfaction for the chatbot? How do you envision these metrics being collected (every interaction/random)? Do you want a dashboard to monitor performance? (e.g., CSAT scores after every interaction, real-time dashboard)
271
+ System Reliability:
272
+ <client context>, What actions should be taken if the system fails or experiences downtime (e.g., notifying users, providing estimated response times)? How can we ensure minimal disruption to the user experience? (e.g., notify users of downtime and provide estimated resolution time)
273
+ <client context>, Would you like to set up automated monitoring and alerting for critical issues, such as system downtime or API failures? (e.g., yes, with email and SMS alerts)
274
 
275
+ Instructions:
276
+ Replace <client context> with relevant information derived from the provided client background.
277
+ Only provide the list of formatted questions without any additional introduction or summary.
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  """,
279
+ inputs=["project_detail"],
280
  outputs=["generated_engage_follow_up_questions"],
281
  model=ModelType.O1_MINI,
282
  description="Generate Engage Context-Aware List Of Questions",
 
354
  }
355
  ),
356
 
357
+ "generate_further_follow_up_questions": PromptConfig(
358
  prompt=
359
  """
360
+ You are an AI Solution Expert with extensive experience in developing both intelligent chatbots and robust document extraction systems for startups. You will be provided with client background information pertaining to their project requirements, which may include a chatbot solution, a document extraction solution, or both.
361
 
362
+ Your task is to:
363
+ 1. Determine the Project Scope:
364
+ Identify whether the project involves only a chatbot solution, only a document extraction solution, or a hybrid of both.
365
+ 2.Identify Gaps and Clarify Requirements:
366
+ - Generate highly specific and actionable follow-up questions to clarify underlying needs.
367
+ - Utilize frameworks such as the 5 Whys and root cause analysis for deeper exploration.
368
+ - Ensure questions are tailored to the identified project scope (Chatbot, Document Extraction, or both).
369
 
370
  Requirements:
371
  You need to FULLY read the input which is given below client background information.
 
393
  )
394
  }
395
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
396
 
397
  #########################################################################################
398
  "generate_prd": PromptConfig(
 
509
 
510
  Tech Stack:
511
  Backend: FastAPI, Python
512
+ Chatbot (ONLY applicable if project involves chatbot): Chatbot Builder , COZE , Yellow.ai
513
  Infrastructure: AWS, PostgreSQL, Redis, Docker, Alembic
514
 
515
  Output Format:
 
542
  )
543
  }
544
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
 
546
  "generate_page_dev_components": PromptConfig(
547
  prompt=
 
848
  )
849
  }
850
  ),
851
+
852
  "reformat_hybrid_dev_components": PromptConfig(
853
  prompt=
854
  """
 
929
  }
930
  ),
931
 
932
+ "generate_plan_test_mandays": PromptConfig(
933
  prompt=
934
  """
935
+ You are an experienced project manager tasked to create a detailed task breakdown for a project based on the planning and testing component list. The project may involve Document Extraction only, Chatbot only, a Hybrid (Document Extraction + Chatbot).
936
 
937
  Objective:
938
+ Generate structured CSV outputs with manday estimates for each planning and testing component, tailored to the type of project.
939
 
940
  Instructions:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
941
  1. Input:
942
+ Use the planning and testing component list to identify all components and subcomponents.
943
+ The hierarchy of the document is: Phase -> Component -> Subcomponent -> Task.
944
 
945
  2. Manday Estimation:
946
+ Assign manday estimates for each component or subcomponent based on complexity and effort required, ranging from 0.2 to 5 days.
947
+ Ensure estimates account for real-world complexity, potential delays, and complications.
948
+
949
+ 3. Output Format Requirements:
950
+ Document Extraction only:
951
+ - Generate a CSV with columns: "component, mandays, description".
952
+ - Description must include deliverables and outcomes.
953
+
954
+ Chatbot only:
955
+ - Generate a CSV with columns: "component, subcomponent, mandays, description".
956
+ - Subcomponents must be clearly listed under each component.
957
+ - Description must include deliverables and outcomes.
958
+
959
+ Hybrid (Document Extraction + Chatbot):
960
+ - Generate TWO separate CSV files as per the above formats, one for Document Extraction and one for Chatbot.
961
+ - Clearly indicate Section Breaks between the two files by including:
962
+ ----Section Break----
963
+
964
+ 4. Output:
965
+ - Return **ONLY** the CSV content, no code blocks or additional text.
966
+ - Ensure all rows have all columns filled.
967
+ - Numeric values should not be quoted.
968
+ - Text values must be enclosed in double quotes.
969
+ - No empty rows or missing values.
970
+ - Omit the ``` code guards
971
  """,
972
  inputs=["generated_plan_test_components"],
973
  outputs=["generated_plan_test_mandays"],
974
  model=ModelType.O1_MINI,
975
+ description="Step 2.1 : Generate Planning & Testing Mandays",
976
  step="Step 2 : Mandays & Quotation",
977
  sub_step="Step 2.1 : Generate Mandays",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
978
  ),
979
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
980
  # Jess : Combine both into one
981
  "generate_dev_mandays": PromptConfig(
982
  prompt=