ICAS03 commited on
Commit
7a1f1e8
·
1 Parent(s): f17e9e7

prompt editing without json file (to be tested on deployment)

Browse files
Files changed (3) hide show
  1. app.py +109 -57
  2. prompt_configs.json +1 -1
  3. prompt_configs.py +1149 -48
app.py CHANGED
@@ -12,47 +12,15 @@ import pandas as pd
12
  from io import StringIO
13
  import importlib
14
  import prompt_configs
15
- import json
16
 
17
- SYSTEM_PROMPTS = load_prompts()
18
-
19
- def load_prompts() -> Dict[str, Any]:
20
  try:
21
- with open("prompt_configs.json", "r", encoding="utf-8") as f:
22
- return json.load(f)
23
- except Exception as e:
24
- print(f"Error loading JSON: {e}")
25
- return {}
26
-
27
- def create_prompt_config(key: str, data: Dict[str, Any]) -> PromptConfig:
28
- try:
29
- return PromptConfig(
30
- prompt=data['prompt'],
31
- inputs=data.get('inputs', []),
32
- outputs=data.get('outputs', []),
33
- model=ModelType[data.get('model', 'O1_MINI')], # Default to O1_MINI if not specified
34
- description=data.get('description', ''),
35
- step=data.get('step', None),
36
- sub_step=data.get('sub_step', None),
37
- ui={k: UIConfig(**v) for k, v in data.get('ui', {}).items()}
38
- )
39
  except Exception as e:
40
- print(f"Error creating PromptConfig for {key}: {e}")
41
- return None
42
-
43
- def reload_prompts():
44
- global SYSTEM_PROMPTS, PROMPTS
45
- SYSTEM_PROMPTS = load_prompts()
46
-
47
- # Debug: Print SYSTEM_PROMPTS to verify content
48
- print("Loaded SYSTEM_PROMPTS:", SYSTEM_PROMPTS)
49
-
50
- # Dynamically create PROMPTS from SYSTEM_PROMPTS
51
- PROMPTS = {
52
- key: create_prompt_config(key, data)
53
- for key, data in SYSTEM_PROMPTS.items()
54
- if create_prompt_config(key, data) is not None
55
- }
56
 
57
  def create_ui_component(config: UIConfig, prompt: str = None) -> Any:
58
  if config.component_type == UIComponentType.TEXTBOX:
@@ -63,6 +31,20 @@ def create_ui_component(config: UIConfig, prompt: str = None) -> Any:
63
  visible=config.visible,
64
  value=prompt
65
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  return None
67
 
68
  def create_prompt_editor_components(prompt_config: PromptConfig) -> Dict[str, Any]:
@@ -72,9 +54,6 @@ def create_prompt_editor_components(prompt_config: PromptConfig) -> Dict[str, An
72
  with gr.Column(scale=1):
73
  editor_key = next((k for k in prompt_config.ui.keys() if k.endswith('_prompt_editor')), None)
74
  if editor_key:
75
- # Set the prompt_key based on the current prompt being edited
76
- prompt_key = editor_key.replace('_prompt_editor', '')
77
-
78
  lines = prompt_config.prompt.split('\n')
79
  if lines:
80
  min_indent = min((len(line) - len(line.lstrip())
@@ -93,44 +72,117 @@ def create_prompt_editor_components(prompt_config: PromptConfig) -> Dict[str, An
93
  prompt_config.ui[editor_key],
94
  formatted_prompt
95
  )
 
 
 
96
  # Add save status display
97
  save_status = gr.Markdown("Ready to save changes...", visible=True)
98
 
99
  def save_prompt_changes(new_prompt: str, prompt_key: str) -> str:
 
100
  try:
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- # Debug: Print the prompt_key to ensure it's correct
103
- print(f"Attempting to update prompt for key: {prompt_key}")
104
-
105
  # Read current file content
106
- with open("prompt_configs.json", "r", encoding='utf-8') as f:
107
- prompts = json.load(f)
108
 
109
- # Ensure the correct prompt_key is used
110
- if prompt_key in prompts:
111
- prompts[prompt_key]['prompt'] = new_prompt.strip()
112
- else:
113
- return f"❌ Error: Prompt key '{prompt_key}' not found"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
  # Write back to file
116
- with open("prompt_configs.json", "w", encoding='utf-8') as f:
117
- json.dump(prompts, f, indent=4)
 
118
 
119
- reload_prompts()
120
-
121
  return "✅ Prompt updated successfully"
122
 
123
  except Exception as e:
124
  return f"❌ Error updating prompt: {str(e)}"
125
 
126
- # Add change handler
 
 
 
 
 
 
 
127
  editor.change(
128
- fn=lambda new_prompt: save_prompt_changes(new_prompt, prompt_key),
129
  inputs=[editor],
130
  outputs=[save_status]
131
  )
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  components[editor_key] = editor
 
134
  components[f"{editor_key}_save_status"] = save_status
135
 
136
  return components
 
12
  from io import StringIO
13
  import importlib
14
  import prompt_configs
 
15
 
16
+ def update_prompts():
 
 
17
  try:
18
+ importlib.reload(prompt_configs)
19
+ global PROMPTS
20
+ PROMPTS = prompt_configs.PROMPTS
21
+ print("PROMPTS updated successfully.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  except Exception as e:
23
+ print(f"Error updating PROMPTS: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  def create_ui_component(config: UIConfig, prompt: str = None) -> Any:
26
  if config.component_type == UIComponentType.TEXTBOX:
 
31
  visible=config.visible,
32
  value=prompt
33
  )
34
+ elif config.component_type == UIComponentType.MARKDOWN:
35
+ return gr.Markdown(
36
+ label=config.label,
37
+ visible=config.visible,
38
+ show_copy_button=config.show_copy_button,
39
+ value=prompt,
40
+ elem_classes=["scrollable-markdown"]
41
+ )
42
+ elif config.component_type == UIComponentType.DATAFRAME:
43
+ return gr.Dataframe(
44
+ label=config.label,
45
+ interactive=config.interactive,
46
+ visible=config.visible
47
+ )
48
  return None
49
 
50
  def create_prompt_editor_components(prompt_config: PromptConfig) -> Dict[str, Any]:
 
54
  with gr.Column(scale=1):
55
  editor_key = next((k for k in prompt_config.ui.keys() if k.endswith('_prompt_editor')), None)
56
  if editor_key:
 
 
 
57
  lines = prompt_config.prompt.split('\n')
58
  if lines:
59
  min_indent = min((len(line) - len(line.lstrip())
 
72
  prompt_config.ui[editor_key],
73
  formatted_prompt
74
  )
75
+
76
+ save_button = gr.Button("Save Changes")
77
+
78
  # Add save status display
79
  save_status = gr.Markdown("Ready to save changes...", visible=True)
80
 
81
  def save_prompt_changes(new_prompt: str, prompt_key: str) -> str:
82
+ global PROMPTS
83
  try:
84
+ # Find the correct prompt key
85
+ prompt_key = None
86
+ for key, config in PROMPTS.items():
87
+ if editor_key in config.ui:
88
+ prompt_key = key
89
+ break
90
+
91
+ if not prompt_key:
92
+ return "❌ Error: Could not find matching prompt key"
93
+
94
+ # Update in-memory config
95
+ PROMPTS[prompt_key].prompt = new_prompt.strip()
96
 
 
 
 
97
  # Read current file content
98
+ with open("prompt_configs.py", "r", encoding='utf-8') as f:
99
+ content = f.read()
100
 
101
+ # Find the start of the prompt section
102
+ section_start = f' "{prompt_key}": PromptConfig('
103
+ start_idx = content.find(section_start)
104
+
105
+ if start_idx == -1:
106
+ return "❌ Error: Could not locate prompt section in config file"
107
+
108
+ # Find the prompt= part
109
+ prompt_start = content.find('prompt=', start_idx)
110
+ if prompt_start == -1:
111
+ return "❌ Error: Could not locate prompt parameter"
112
+
113
+ # Find the triple quote after prompt=
114
+ first_quote = content.find('"""', prompt_start)
115
+ if first_quote == -1:
116
+ return "❌ Error: Could not locate start of prompt content"
117
+
118
+ # Find the closing triple quote
119
+ end_quote = content.find('"""', first_quote + 3)
120
+ if end_quote == -1:
121
+ return "❌ Error: Could not locate end of prompt content"
122
+
123
+ # Format the new prompt with proper indentation
124
+ indented_lines = []
125
+ for line in new_prompt.strip().split('\n'):
126
+ if line.strip():
127
+ indented_lines.append(f' {line.strip()}')
128
+ else:
129
+ indented_lines.append(' ')
130
+
131
+ # Create the new section with proper formatting
132
+ new_section = (
133
+ 'prompt=\n'
134
+ ' """\n' +
135
+ '\n'.join(indented_lines) +
136
+ '\n """'
137
+ )
138
+
139
+ # Replace just the prompt content
140
+ new_content = (
141
+ content[:prompt_start] +
142
+ new_section +
143
+ content[end_quote + 3:]
144
+ )
145
 
146
  # Write back to file
147
+ with open("prompt_configs.py", "w", encoding='utf-8') as f:
148
+ f.write(new_content)
149
+
150
 
 
 
151
  return "✅ Prompt updated successfully"
152
 
153
  except Exception as e:
154
  return f"❌ Error updating prompt: {str(e)}"
155
 
156
+
157
+ temp_changes = {}
158
+
159
+ def store_changes(new_prompt: str):
160
+ # Update the temporary changes dictionary
161
+ temp_changes[prompt_key] = new_prompt
162
+ return "Changes stored. Click 'Save All' to apply."
163
+
164
  editor.change(
165
+ fn=store_changes,
166
  inputs=[editor],
167
  outputs=[save_status]
168
  )
169
 
170
+ def save_all_changes():
171
+ # Iterate over all stored changes and save them
172
+ for key, new_prompt in temp_changes.items():
173
+ save_prompt_changes(new_prompt, key)
174
+ update_prompts()
175
+ return "✅ All changes saved and prompts reloaded."
176
+
177
+
178
+ save_button.click(
179
+ fn=save_all_changes,
180
+ inputs=[],
181
+ outputs=[save_status]
182
+ )
183
+
184
  components[editor_key] = editor
185
+ components[f"{editor_key}_save_button"] = save_button
186
  components[f"{editor_key}_save_status"] = save_status
187
 
188
  return components
prompt_configs.json CHANGED
@@ -18,7 +18,7 @@
18
  "prompt": "**You are an AI Solution Expert with extensive experience in developing both intelligent chatbots and robust document extraction systems for startups.\nYou will be provided with client background information pertaining to their project requirements, which may include a chatbot solution, a document extraction solution, or both.\n\nYour task is to:\n1. Determine the Project Scope:\nIdentify whether the project involves only a chatbot solution, only a document extraction solution, or a hybrid of both.\n2. Identify Gaps and Clarify Requirements:\n- Generate highly specific and actionable follow-up questions to clarify underlying needs.\n- Utilize frameworks such as the 5 Whys and root cause analysis for deeper exploration.\n- Ensure questions are tailored to the identified project scope (Chatbot, Document Extraction, or both).\n\nRequirements:\nYou need to FULLY read the input which is given below client background information.\nGenerate follow-up questions to identify missing details or ambiguities.\nUse specific references to prior responses for continuity. For example: \"You mentioned [context]. Can you elaborate on [specific aspect]?\"\nApply the 5 Whys to delve deeper where necessary. For example: \"Why do call centers become overloaded during month-end? Are there specific processes causing bottlenecks?\"\nHighlight systemic issues where patterns emerge (e.g., manual processes across multiple challenges).\n\n# Output Format:\n# <index><question>(sample answers)\n\nJust return the generated list of follow up questions as string and nothing else."
19
  },
20
  "generate_prd": {
21
- "prompt": "Rewrite this for clarity while keeping all specific details, metrics, and constraints.\nPlease take note of the time constraint to build the MVP.\nDo not include context or assumptions beyond the input provided.\nDo not also exclude any input provided.\nStructure the document to ensure clarity and logical flow.\nMake sure the title is \"Project Details\"."
22
  },
23
  "generate_intent_list": {
24
  "prompt": "You are an solution architect and project manager with 20+ years of experience in building chatbots and AI-powered systems.\nYour task is to analyze the provided project requirement document and help me understand the complexity of the project by defining ALL possible intents and workflows for each requirement.\nAdditionally, anticipate and suggest realistic, real-life intents that might have been overlooked by the client but are critical for a robust and user-friendly chatbot. You can reference **how other chatbots in the same industry are functioning** and suggest intents that are commonly provided by competitors but may not have been explicitly mentioned by the client. List down these suggestion in one seperate table.\n\nInstruction:\n1. **Break Down Requirements**: Identify all the client requirements from the document.\n2. **Define ALL Possible Intents and Workflows**: For each requirement, list ALL possible intents and their corresponding workflows. - Use \"\u2192\" to represent the flow between steps.\n- Provide your analysis in a clear, highly readable and structured table format. The intents to consider are:\n- Simple\n- Complex\n- Multi-Step\n- Single-Step\n- Fallback.Focus on business-centric fallback (e.g., \"Order Management Fallback\" should address specific order-related issues like invalid SKU, payment failure, or inventory unavailability)\n- Others (if applicable)\n\nOutput Format:\nPresent the analysis in a tabular format with the following structure:\n- Each requirement as the title of a separate table.\n- **Columns**:\n- Intent Type (e.g., Simple, Complex, Multi-Step, etc.)\n- Intent (e.g., Order Status Inquiry, Track Order Progress, etc.)\n- Workflow (describe the technical/logical steps using \"\u2192\" and numbered steps, e.g., \"1. Extract user input \u2192 2. Query database \u2192 3. Return result\")."
 
18
  "prompt": "**You are an AI Solution Expert with extensive experience in developing both intelligent chatbots and robust document extraction systems for startups.\nYou will be provided with client background information pertaining to their project requirements, which may include a chatbot solution, a document extraction solution, or both.\n\nYour task is to:\n1. Determine the Project Scope:\nIdentify whether the project involves only a chatbot solution, only a document extraction solution, or a hybrid of both.\n2. Identify Gaps and Clarify Requirements:\n- Generate highly specific and actionable follow-up questions to clarify underlying needs.\n- Utilize frameworks such as the 5 Whys and root cause analysis for deeper exploration.\n- Ensure questions are tailored to the identified project scope (Chatbot, Document Extraction, or both).\n\nRequirements:\nYou need to FULLY read the input which is given below client background information.\nGenerate follow-up questions to identify missing details or ambiguities.\nUse specific references to prior responses for continuity. For example: \"You mentioned [context]. Can you elaborate on [specific aspect]?\"\nApply the 5 Whys to delve deeper where necessary. For example: \"Why do call centers become overloaded during month-end? Are there specific processes causing bottlenecks?\"\nHighlight systemic issues where patterns emerge (e.g., manual processes across multiple challenges).\n\n# Output Format:\n# <index><question>(sample answers)\n\nJust return the generated list of follow up questions as string and nothing else."
19
  },
20
  "generate_prd": {
21
+ "prompt": "Rewrite this for clarity while keeping all specific details, metrics, and constraints.\nPlease take note of the time constraint to build the MVP.\nDo not include context or assumptions beyond the input provided.\nDo not also exclude any input provided.\nStructure the document to ensure clarity and logical flow.\nMake sure the title is \"Project Requirements\"."
22
  },
23
  "generate_intent_list": {
24
  "prompt": "You are an solution architect and project manager with 20+ years of experience in building chatbots and AI-powered systems.\nYour task is to analyze the provided project requirement document and help me understand the complexity of the project by defining ALL possible intents and workflows for each requirement.\nAdditionally, anticipate and suggest realistic, real-life intents that might have been overlooked by the client but are critical for a robust and user-friendly chatbot. You can reference **how other chatbots in the same industry are functioning** and suggest intents that are commonly provided by competitors but may not have been explicitly mentioned by the client. List down these suggestion in one seperate table.\n\nInstruction:\n1. **Break Down Requirements**: Identify all the client requirements from the document.\n2. **Define ALL Possible Intents and Workflows**: For each requirement, list ALL possible intents and their corresponding workflows. - Use \"\u2192\" to represent the flow between steps.\n- Provide your analysis in a clear, highly readable and structured table format. The intents to consider are:\n- Simple\n- Complex\n- Multi-Step\n- Single-Step\n- Fallback.Focus on business-centric fallback (e.g., \"Order Management Fallback\" should address specific order-related issues like invalid SKU, payment failure, or inventory unavailability)\n- Others (if applicable)\n\nOutput Format:\nPresent the analysis in a tabular format with the following structure:\n- Each requirement as the title of a separate table.\n- **Columns**:\n- Intent Type (e.g., Simple, Complex, Multi-Step, etc.)\n- Intent (e.g., Order Status Inquiry, Track Order Progress, etc.)\n- Workflow (describe the technical/logical steps using \"\u2192\" and numbered steps, e.g., \"1. Extract user input \u2192 2. Query database \u2192 3. Return result\")."
prompt_configs.py CHANGED
@@ -1,15 +1,6 @@
1
  from dataclasses import dataclass
2
- from dataclasses import dataclass
3
  from enum import Enum
4
  from typing import List, Dict, Any, Optional
5
- import json
6
- from typing import Dict, Any
7
-
8
- def load_prompts() -> Dict[str, Any]:
9
- with open("prompt_configs.json", "r", encoding="utf-8") as f:
10
- return json.load(f)
11
-
12
- SYSTEM_PROMPTS = load_prompts()
13
 
14
  class ModelType(Enum):
15
  O1_MINI = "o1-mini"
@@ -45,9 +36,105 @@ class PromptConfig:
45
  ui: Dict[str, UIConfig] = None
46
 
47
  PROMPTS = {
48
-
49
  "component_agent": PromptConfig(
50
- prompt=SYSTEM_PROMPTS['component_agent']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  inputs=['generated_prd'],
52
  outputs=["configuration_type"],
53
  step="Step 1 : Scope & Components",
@@ -77,7 +164,31 @@ PROMPTS = {
77
  ),
78
 
79
  "client_initial_question": PromptConfig(
80
- prompt=SYSTEM_PROMPTS['client_initial_question']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  inputs=[],
82
  outputs=[],
83
  model=ModelType.O1_MINI,
@@ -94,7 +205,37 @@ PROMPTS = {
94
  ),
95
 
96
  "generate_client_follow_up": PromptConfig(
97
- prompt=SYSTEM_PROMPTS['generate_client_follow_up']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  inputs=["project_detail"],
99
  outputs=["follow_up_questions"],
100
  model=ModelType.O1_MINI,
@@ -111,7 +252,57 @@ PROMPTS = {
111
  ),
112
 
113
  "generate_engage_follow_up_questions": PromptConfig(
114
- prompt=SYSTEM_PROMPTS['generate_engage_follow_up_questions']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  inputs=["project_detail"],
116
  outputs=["generated_engage_follow_up_questions"],
117
  model=ModelType.O1_MINI,
@@ -128,7 +319,53 @@ PROMPTS = {
128
  ),
129
 
130
  "generate_page_follow_up_questions": PromptConfig(
131
- prompt=SYSTEM_PROMPTS['generate_page_follow_up_questions']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  inputs=['project_detail'],
133
  outputs=['generated_page_follow_up_questions'],
134
  model=ModelType.O1_MINI,
@@ -145,16 +382,39 @@ PROMPTS = {
145
  ),
146
 
147
  "generate_further_follow_up_questions": PromptConfig(
148
- prompt=SYSTEM_PROMPTS['generate_further_follow_up_questions']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  inputs=["project_detail"],
150
  outputs=["generated_engage_further_follow_up_questions"],
151
  model=ModelType.O1_MINI,
152
- description="Generate Further Follow Up Questions",
153
  step="Chatbot Prompt Editors",
154
  ui={
155
  "generate_further_follow_up_questions_prompt_editor": UIConfig(
156
  component_type=UIComponentType.TEXTBOX,
157
- label="Further Follow Up Questions Prompt",
158
  lines=20,
159
  interactive=True
160
  )
@@ -162,7 +422,15 @@ PROMPTS = {
162
  ),
163
 
164
  "generate_prd": PromptConfig(
165
- prompt=SYSTEM_PROMPTS['generate_prd']['prompt'],
 
 
 
 
 
 
 
 
166
  inputs=["project_detail"],
167
  outputs=["generated_prd"],
168
  model=ModelType.O1_MINI,
@@ -191,7 +459,31 @@ PROMPTS = {
191
  ),
192
 
193
  "generate_intent_list": PromptConfig(
194
- prompt=SYSTEM_PROMPTS['generate_intent_list']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195
  inputs=["generated_prd"],
196
  outputs=["generated_intent_list"],
197
  model=ModelType.O1_MINI,
@@ -220,11 +512,33 @@ PROMPTS = {
220
  ),
221
 
222
  "generate_plan_test_components": PromptConfig(
223
- prompt=SYSTEM_PROMPTS['generate_plan_test_components']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
  inputs=["generated_prd"],
225
  outputs=["generated_plan_test_components"],
226
  model=ModelType.O1_MINI,
227
- description="Generate Page Planning and Testing Components",
228
  step="Step 1 : Scope & Components",
229
  ui={
230
  "generate_plan_test_components_prompt_editor": UIConfig(
@@ -249,7 +563,46 @@ PROMPTS = {
249
  ),
250
 
251
  "generate_page_dev_components": PromptConfig(
252
- prompt=SYSTEM_PROMPTS['generate_page_dev_components']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
  inputs=["generated_prd"],
254
  outputs=["generated_page_dev_components"],
255
  model=ModelType.O1_MINI,
@@ -278,7 +631,70 @@ PROMPTS = {
278
  ),
279
 
280
  "generate_engage_dev_components": PromptConfig(
281
- prompt=SYSTEM_PROMPTS['generate_engage_dev_components']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
  inputs=["generated_prd" , "generated_intent_list"],
283
  outputs=["generated_engage_dev_components"],
284
  model=ModelType.O1_MINI,
@@ -307,7 +723,44 @@ PROMPTS = {
307
  ),
308
 
309
  "reformat_page_dev_components": PromptConfig(
310
- prompt=SYSTEM_PROMPTS['reformat_page_dev_components']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
  inputs=["generated_page_dev_components"],
312
  outputs=["reformatted_dev_components"],
313
  model=ModelType.O1_MINI,
@@ -336,7 +789,58 @@ PROMPTS = {
336
  ),
337
 
338
  "reformat_engage_dev_components": PromptConfig(
339
- prompt=SYSTEM_PROMPTS['reformat_engage_dev_components']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  inputs=["generated_engage_dev_components"],
341
  outputs=["reformatted_dev_components"],
342
  model=ModelType.O1_MINI,
@@ -365,11 +869,29 @@ PROMPTS = {
365
  ),
366
 
367
  "reformat_hybrid_dev_components": PromptConfig(
368
- prompt=SYSTEM_PROMPTS['reformat_hybrid_dev_components']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
  inputs=["generated_engage_dev_components","generated_page_dev_components"],
370
  outputs=["reformatted_dev_components"],
371
  model=ModelType.O1_MINI,
372
- description="Reformat Hybird Development Components",
373
  step="Step 1 : Scope & Components",
374
  ui={
375
  "reformat_hybrid_dev_components_prompt_editor": UIConfig(
@@ -394,7 +916,15 @@ PROMPTS = {
394
  ),
395
 
396
  "generate_intents_csv": PromptConfig(
397
- prompt=SYSTEM_PROMPTS['generate_intents_csv']['prompt'],
 
 
 
 
 
 
 
 
398
  inputs=["generated_intent_list"],
399
  outputs=["generated_intents_csv"],
400
  model=ModelType.O1_MINI,
@@ -418,11 +948,38 @@ PROMPTS = {
418
  ),
419
 
420
  "generate_page_plan_test_mandays": PromptConfig(
421
- prompt=SYSTEM_PROMPTS['generate_page_plan_test_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
  inputs=["generated_plan_test_components"],
423
  outputs=["generated_plan_test_mandays"],
424
  model=ModelType.O1_MINI,
425
- description="Generate page planning and testing mandays",
426
  step="Step 2 : Mandays & Quotation",
427
  sub_step="Step 2.1 : Generate Mandays",
428
  ui={
@@ -441,11 +998,33 @@ PROMPTS = {
441
  ),
442
 
443
  "generate_engage_plan_test_mandays": PromptConfig(
444
- prompt=SYSTEM_PROMPTS['generate_engage_plan_test_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445
  inputs=["generated_plan_test_components"],
446
  outputs=["generated_plan_test_mandays"],
447
  model=ModelType.O1_MINI,
448
- description="Generate engage planning and testing mandays",
449
  step="Step 2 : Mandays & Quotation",
450
  sub_step="Step 2.1 : Generate Mandays",
451
  ui={
@@ -464,11 +1043,46 @@ PROMPTS = {
464
  ),
465
 
466
  "generate_hybrid_plan_test_mandays": PromptConfig(
467
- prompt=SYSTEM_PROMPTS['generate_hybrid_plan_test_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468
  inputs=["generated_plan_test_components"],
469
  outputs=["generated_plan_test_mandays"],
470
  model=ModelType.O1_MINI,
471
- description="Generate hybrid planning and testing mandays",
472
  step="Step 2 : Mandays & Quotation",
473
  sub_step="Step 2.1 : Generate Mandays",
474
  ui={
@@ -487,7 +1101,33 @@ PROMPTS = {
487
  ),
488
 
489
  "generate_dev_mandays": PromptConfig(
490
- prompt=SYSTEM_PROMPTS['generate_dev_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491
  inputs=["reformatted_dev_components"],
492
  outputs=["generated_dev_mandays"],
493
  model=ModelType.O1_MINI,
@@ -511,7 +1151,28 @@ PROMPTS = {
511
  ),
512
 
513
  "analyze_planning_testing_mandays": PromptConfig(
514
- prompt=SYSTEM_PROMPTS['analyze_planning_testing_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515
  inputs=["generated_plan_test_mandays"],
516
  outputs=["identified_planning_testing_components"],
517
  model=ModelType.O1_MINI,
@@ -535,7 +1196,33 @@ PROMPTS = {
535
  ),
536
 
537
  "analyze_development_mandays": PromptConfig(
538
- prompt=SYSTEM_PROMPTS['analyze_development_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
539
  inputs=["generated_dev_mandays"],
540
  outputs=["identified_development_components"],
541
  model=ModelType.O1_MINI,
@@ -559,7 +1246,33 @@ PROMPTS = {
559
  ),
560
 
561
  "analyze_MVP_intents": PromptConfig(
562
- prompt=SYSTEM_PROMPTS['analyze_MVP_intents']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
563
  inputs=["generated_intent_list"],
564
  outputs=["identified_mvp_intents"],
565
  model=ModelType.O1_MINI,
@@ -583,7 +1296,45 @@ PROMPTS = {
583
  ),
584
 
585
  "recalculate_page_MVP_mandays": PromptConfig(
586
- prompt=SYSTEM_PROMPTS['recalculate_page_MVP_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587
  inputs=["identified_planning_testing_components", "identified_development_components" , "generated_prd"],
588
  outputs=["revised_mandays_estimates"],
589
  model=ModelType.O1_MINI,
@@ -613,7 +1364,65 @@ PROMPTS = {
613
  ),
614
 
615
  "recalculate_engage_MVP_mandays": PromptConfig(
616
- prompt=SYSTEM_PROMPTS['recalculate_engage_MVP_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
617
  inputs=["identified_planning_testing_components", "identified_development_components", "identified_mvp_intents", "generated_prd"],
618
  outputs=["revised_mandays_estimates"],
619
  model=ModelType.O1_MINI,
@@ -643,7 +1452,32 @@ PROMPTS = {
643
  ),
644
 
645
  "generate_page_MVP_mandays": PromptConfig(
646
- prompt=SYSTEM_PROMPTS['generate_page_MVP_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647
  inputs=["revised_mandays_estimates"],
648
  outputs=["generated_MVP_mandays"],
649
  model=ModelType.O1_MINI,
@@ -673,7 +1507,36 @@ PROMPTS = {
673
  ),
674
 
675
  "generate_engage_MVP_mandays": PromptConfig(
676
- prompt=SYSTEM_PROMPTS['generate_engage_MVP_mandays']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677
  inputs=["revised_mandays_estimates"],
678
  outputs=["generated_MVP_mandays"],
679
  model=ModelType.O1_MINI,
@@ -709,7 +1572,22 @@ PROMPTS = {
709
  ),
710
 
711
  "generate_page_MVP_prd": PromptConfig(
712
- prompt=SYSTEM_PROMPTS['generate_page_MVP_prd']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
713
  inputs=["generated_prd" , "generated_MVP_mandays"],
714
  outputs=["generated_mvp_prd"],
715
  model=ModelType.O1_MINI,
@@ -738,7 +1616,22 @@ PROMPTS = {
738
  ),
739
 
740
  "generate_engage_MVP_prd": PromptConfig(
741
- prompt=SYSTEM_PROMPTS['generate_engage_MVP_prd']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
742
  inputs=["generated_prd" , "generated_MVP_mandays"],
743
  outputs=["generated_mvp_prd"],
744
  model=ModelType.O1_MINI,
@@ -767,7 +1660,87 @@ PROMPTS = {
767
  ),
768
 
769
  "generate_page_BD_SOW": PromptConfig(
770
- prompt=SYSTEM_PROMPTS['generate_page_BD_SOW']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
771
  inputs=["generated_prd" , "generated_plan_test_components" , "reformatted_dev_components" , "combined_cost_summary"],
772
  outputs=["generated_BD_SOW"],
773
  model=ModelType.O1_MINI,
@@ -796,7 +1769,87 @@ PROMPTS = {
796
  ),
797
 
798
  "generate_engage_BD_SOW": PromptConfig(
799
- prompt=SYSTEM_PROMPTS['generate_engage_BD_SOW']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
800
  inputs=["generated_prd" , "generated_plan_test_components" , "reformatted_dev_components" , "generated_intent_list" , "combined_cost_summary" ],
801
  outputs=["generated_BD_SOW"],
802
  model=ModelType.O1_MINI,
@@ -825,7 +1878,55 @@ PROMPTS = {
825
  ),
826
 
827
  "generate_Tech_SOW": PromptConfig(
828
- prompt=SYSTEM_PROMPTS['generate_Tech_SOW']['prompt'],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
829
  inputs=["generated_plan_test_components","reformatted_dev_components","generated_MVP_mandays"],
830
  outputs=["generated_Tech_SOW"],
831
  model=ModelType.O1_MINI,
 
1
  from dataclasses import dataclass
 
2
  from enum import Enum
3
  from typing import List, Dict, Any, Optional
 
 
 
 
 
 
 
 
4
 
5
  class ModelType(Enum):
6
  O1_MINI = "o1-mini"
 
36
  ui: Dict[str, UIConfig] = None
37
 
38
  PROMPTS = {
 
39
  "component_agent": PromptConfig(
40
+ prompt=
41
+ """
42
+ You are an AI that analyzes a software project’s requirements and determines all possible valid and unique configurations based on the provided constraints.
43
+
44
+ Input Data:
45
+
46
+ - Project Requirement Document (PRD) – Describes the project’s goals and scope.
47
+ - List of Functions – A predefined set of function names and descriptions. STRICTLY use these ONLY (do not create new functions).
48
+
49
+ Objective:
50
+ Identify and classify valid project configurations into one of the following categories:
51
+
52
+ - Hybrid Configurations – If the project requires both chatbot and document extraction functionalities.
53
+ - Chatbot-Only Configurations – If the project is focused solely on chatbot functionality.
54
+ - Document Extraction-Only Configurations – If the project involves document extraction but does not require chatbot features.
55
+
56
+ Rules for Ensuring Uniqueness:
57
+
58
+ - Each configuration must belong to only one category (Chatbot, Document Extraction, or Hybrid) and be sorted by best match to the project requirements.
59
+ - Avoid redundant permutations – Ensure no duplicate function combinations under different names.
60
+ - Do NOT create new functions – Use only those from the provided function list.
61
+
62
+ Expected Output Format:
63
+ - For Hybrid Configurations , the configuration_type should be "Basic Chatbot and Document Extraction".
64
+ - Output EXACTLY ONE configuration (never multiple)
65
+ - Never combine configurations (hybrid replaces standalone types)
66
+ - Never create new functions or modify existing function lists
67
+ - A structured JSON object with no duplicate function combinations.
68
+ - DO NOT include any code guards or placeholders.
69
+
70
+ Example 1:
71
+ Project Requirement:
72
+ "The system must provide a chatbot to handle customer queries. It should support intent recognition and predefined workflows. The chatbot must work across web, mobile, and messaging platforms and escalate complex queries to human agents when needed."
73
+
74
+ OUTPUT:
75
+ [
76
+ {
77
+ "configuration_type": "Basic Chatbot",
78
+ "selected_functions": [
79
+ "generate_plan_test_components",
80
+ "generate_intent_list",
81
+ "generate_engage_dev_components"
82
+ ]
83
+ }
84
+ ]
85
+
86
+ Example 2:
87
+ Project Requirement:
88
+ "The system must extract structured data from PDFs. It should identify key fields, validate extracted data, support batch processing, and integrate with our existing software."
89
+
90
+ OUTPUT:
91
+ [
92
+ {
93
+ "configuration_type": "Basic Document Extraction",
94
+ "selected_functions": [
95
+ "generate_plan_test_components",
96
+ "generate_page_dev_components",
97
+ "reformat_page_dev_components"
98
+ ]
99
+ }
100
+ ]
101
+
102
+ Example 3:
103
+ Project Requirement:
104
+ "The system must integrate a chatbot with document extraction. The chatbot should accept user-uploaded documents, extract relevant data, and respond based on extracted content."
105
+
106
+ OUTPUT:
107
+ [
108
+ {
109
+ "configuration_type": "Basic Chatbot and Document Extraction",
110
+ "selected_functions": [
111
+ "generate_plan_test_components",
112
+ "generate_intent_list",
113
+ "generate_page_dev_components",
114
+ "generate_engage_dev_components",
115
+ "reformat_hybrid_dev_components"
116
+ ]
117
+ }
118
+ ]
119
+
120
+ <List of Functions: ONLY use these>
121
+ generate_plan_test_components: For ALL projects, generates a granular component list for the Planning and Testing phases,while explicitly excluding development-related tasks.
122
+
123
+ generate_page_dev_components: For document extraction projects, produces a structured development component breakdown, categorizing frontend, backend, and integration tasks, while addressing edge cases and special considerations.
124
+
125
+ generate_intent_list: For chatbot projects, analyzes a project’s requirements to define all possible chatbot intents and workflows, categorizing them by complexity (simple, multi-step, fallback, etc.) while also identifying overlooked but industry-relevant intents based on competitor comparisons.
126
+
127
+ generate_engage_dev_components: For chatbot projects, extracts comprehensive development components from the Project Requirement Document (PRD) and Chatbot Intent List (CIL), translating intents, workflows, and fallback scenarios into specific technical deliverables for structured implementation.
128
+
129
+ reformat_page_dev_components: Reformat the generated development components for document extraction projects to ensure consistent naming conventions, clarity, and logical grouping (e.g., Frontend, Backend, Integration).
130
+
131
+ reformat_engage_dev_components: Standardize and harmonize naming conventions for chatbot development components by cross-checking against a reference list, retaining all unique elements, and ensuring proper categorization of subcomponents.
132
+
133
+ reformat_hybrid_dev_components:Merge the chatbot and document extraction component lists into a unified, optimized list by consolidating overlapping elements, eliminating redundancies, and structuring the final output into clearly defined component categories.
134
+
135
+
136
+ <Requirements>
137
+ """,
138
  inputs=['generated_prd'],
139
  outputs=["configuration_type"],
140
  step="Step 1 : Scope & Components",
 
164
  ),
165
 
166
  "client_initial_question": PromptConfig(
167
+ prompt=
168
+ """
169
+ # Client Information Gathering Questions
170
+
171
+ ### Company Background and Industry
172
+ 1. Can you provide some background about your company?
173
+ 2. Which industry do you operate in, and what is your company's niche or specialization?
174
+ 3. Who are your primary customers?
175
+ 4. What are the main objectives you want to achieve?
176
+ 5. What key features or functionalities do you need?
177
+
178
+ ### Current Challenges
179
+ 6. What are the biggest challenges your firm is currently facing?
180
+ 7. Can you describe your current processes?
181
+
182
+ ### Workflow and System Impact
183
+ 8. How will this solution benefit your firm as a whole?
184
+
185
+ ### Existing Workflow or System
186
+ 9. Can you describe your current workflow or system?
187
+
188
+ ### Pain Point Identification
189
+ 10. Where is your current system falling short or causing delays?
190
+ 11. Are there any parts of the process that are particularly time-consuming/ prone to error?
191
+ """,
192
  inputs=[],
193
  outputs=[],
194
  model=ModelType.O1_MINI,
 
205
  ),
206
 
207
  "generate_client_follow_up": PromptConfig(
208
+ prompt=
209
+ """
210
+ Based on the initial list of questions and the client's provided answers, generate **insightful and targeted follow-up questions** that will help deepen my understanding of the following critical aspects:
211
+
212
+ 1. **Client Overview**
213
+ **Objective:** ask relevant questions that will directly contribute to better project requirements gathering. (ie: department team that the project is meant for ..etc)
214
+
215
+ 2. **Project Vision and Value**
216
+ **Objective:** Clarify the intended impact of the project on the client's business. Understand how it will improve their processes, solve key challenges, and deliver measurable benefits.
217
+ **Focus:** Investigate specific outcomes, immediate expected goals, and how success will be defined.
218
+
219
+ 3. **Existing System or Workflow Description**
220
+ **Objective:** Delve deeper into the client's current tools, workflows, and processes to uncover pain points, integration requirements, and opportunities for optimization.
221
+ **Focus:** Identify inefficiencies, technical limitations, or gaps that the project will address.
222
+
223
+ 4. **Budget and Resource Constraints**
224
+ **Objective:** Clearly define any limitations or constraints—financial, resource-based, or time-related—that could impact project success.
225
+ **Focus:** Understand the flexibility of the budget, timeline expectations, and resource availability.
226
+
227
+ Instructions:
228
+ Each question should:
229
+ Build on provided client information
230
+ Non repetitive, and unique. Avoid asking similar questions.
231
+ Include realistic sample answers relevant to the client's context
232
+ Focus on gathering quantifiable or specific information
233
+
234
+
235
+ Output top 10 questions in the following format:
236
+ <question>(sample answers)
237
+ Just return the text and NOTHING else. Do not overexplain, omit code guards.
238
+ """,
239
  inputs=["project_detail"],
240
  outputs=["follow_up_questions"],
241
  model=ModelType.O1_MINI,
 
252
  ),
253
 
254
  "generate_engage_follow_up_questions": PromptConfig(
255
+ prompt=
256
+ """
257
+ **You are a Software Development Expert specializing in scalable, secure, and robust chatbot systems.
258
+ You will be provided with client background information and a requirements rubric.
259
+ Your task is to create a dynamic, context-aware list of questions to collaboratively gather client requirements for a chatbot application.
260
+ Use the requirements rubric as a baseline. Generate additional, relevant questions on top of this baseline where appropriate.
261
+ Use the client's context to add clarity or relevance to the question.
262
+ Each question should provide actionable insights to uncover critical details about client needs and include sample answers as guidance to the client.
263
+
264
+ Areas to Cover:
265
+ Business Requirements:
266
+ <client context>, What specific business outcomes should this chatbot achieve? (e.g., providing information, assisting users, facilitating specific tasks like event-related inquiries)
267
+ <client context>, Which current customer service challenges should the chatbot address? How are these challenges currently being addressed? Should this chatbot replace or complement existing customer service channels? Can you give more details? (e.g., long response times, lack of 24/7 support)
268
+ Conversational Design:
269
+ <client context>, Should the chatbot handle tasks that require only one piece of information, like retrieving an order ID? (e.g., yes, for order status checks)
270
+ <client context>, Can you provide 2 end-to-end expected conversation flows? (e.g., user asks for order status, chatbot obtains order ID, checks status, and notifies user)
271
+ <client context>, What are the most frequently asked questions by customers currently? Can you provide an expected number of questions in this FAQ or some sample questions? Where are the answers to these questions stored right now? (e.g., business hours, promotions)
272
+ <client context>, How should the chatbot handle multiple failed attempts? When should the chatbot escalate to a human agent after failed attempts? (e.g., after 3 failed attempts, escalate to a live agent)
273
+ <client context>, Will the chatbot handle complex tasks that involve multiple steps, such as booking appointments? If so, can you provide an expected range of questions that fall under this category and some sample scenarios? (e.g., booking a doctor’s appointment)
274
+ Technical Integration:
275
+ <client context>, Where would you prefer the chatbot to be hosted? On your own servers, in the cloud, or a mix of both? (e.g., cloud-based hosting for scalability)
276
+ <client context>, Do you need the chatbot to be deployed in a specific geographical region for data residency or compliance reasons? (e.g., EU region for GDPR compliance)
277
+ <client context>, Which platforms would you like the chatbot to be available on (e.g., website, mobile app, social media)? Which platform should it be prioritized? (e.g. website and mobile app as priority)
278
+ <client context>, Are there any existing tools or systems (e.g., CRM, ERP, customer service software) the chatbot should connect to? If so, what are the software names? (e.g., Salesforce, Zendesk)
279
+ <client context>, Do these systems provide APIs for integration? (e.g., yes, Salesforce and Zendesk provide APIs)
280
+ <client context>, How do you envision the chatbot interacting with these tools or systems? Will it need to fetch data, trigger actions, or both? (e.g., fetch customer data and create support tickets)
281
+ Data Requirements:
282
+ <client context>, What type of data formats will you expect the chatbot to process and handle? Will they be in text, files, images, audio, or perhaps video form? (e.g., text and images for product inquiries)
283
+ <client context>, Should the chatbot have the ability to update or modify data in these systems (e.g., creating tickets, updating customer information)? (e.g., yes, update customer profiles)
284
+ <client context>, How does your company store data? Is it in databases, spreadsheets, or cloud storage? If so, provide the name of the database. (e.g., MySQL database, Google Sheets)
285
+ <client context>, How often does the data in your system update? Is it updated in real-time, daily, or weekly? (e.g., real-time updates for inventory levels)
286
+ <client context>, Does the chatbot need to retrieve and provide real-time updates on things like prices, stock levels, or delivery status? How are these real-time data being stored or accessed? (e.g., yes, via API integration with inventory management system)
287
+ Performance and Scalability:
288
+ <client context>, What are the expected peak hours and maximum volume during these periods? Include timezone considerations and seasonal peaks. (e.g., 10 AM–2 PM, 500 concurrent users during holiday season)
289
+ <client context>, What is the expected number of simultaneous users accessing the chatbot? What is the required capacity for parallel conversations? (e.g., 200 concurrent users, 500 parallel conversations)
290
+ <client context>, What are the required response times and reliability expectations for the chatbot? How should we define server capacity and API limits to maintain optimal performance, even during peak traffic? (e.g., response time under 2 seconds, 99.9% uptime)
291
+ Security and Compliance:
292
+ <client context>, Will the chatbot handle any sensitive data? Are there industry-specific regulations to follow? (e.g., yes, GDPR compliance for customer data)
293
+ <client context>, What security measures are required to protect the data processed by the chatbot? (e.g., encryption for data in transit and at rest)
294
+ User Experience:
295
+ <client context>, Who are the key user personas for the chatbot? What are their primary needs (e.g., language differences)? (e.g., customers, support agents, multilingual support)
296
+ <client context>, What languages should the chatbot support? What tone should the chatbot use (e.g., friendly, formal)? (e.g., English and Spanish, friendly tone)
297
+ <client context>, What kind of metrics do you want to collect from the user, e.g., customer satisfaction for the chatbot? How do you envision these metrics being collected (every interaction/random)? Do you want a dashboard to monitor performance? (e.g., CSAT scores after every interaction, real-time dashboard)
298
+ System Reliability:
299
+ <client context>, What actions should be taken if the system fails or experiences downtime (e.g., notifying users, providing estimated response times)? How can we ensure minimal disruption to the user experience? (e.g., notify users of downtime and provide estimated resolution time)
300
+ <client context>, Would you like to set up automated monitoring and alerting for critical issues, such as system downtime or API failures? (e.g., yes, with email and SMS alerts)
301
+
302
+ Instructions:
303
+ Replace <client context> with relevant information derived from the provided client background.
304
+ Only provide the list of formatted questions without any additional introduction or summary.
305
+ """,
306
  inputs=["project_detail"],
307
  outputs=["generated_engage_follow_up_questions"],
308
  model=ModelType.O1_MINI,
 
319
  ),
320
 
321
  "generate_page_follow_up_questions": PromptConfig(
322
+ prompt=
323
+ """
324
+ **You are a Software Development Expert specializing in scalable, secure, and robust document processing systems.
325
+ You will be provided with client background information.
326
+ Your task is to create a dynamic, context-aware list of questions to collaboratively gather client requirements for a document processing application.
327
+ Use the list below as a baseline. Generate additional, relevant questions on top of this baseline where appropriate.
328
+ Use the client's context to add clarity or relevance to the question.
329
+ Each question should provide actionable insights to uncover critical details about client needs and Sample Answers as guidance to the client to answer the questions.
330
+ ### Areas to Cover:
331
+ ---
332
+ Document Types:
333
+ <client context>, What specific types of documents will the application need to process (e.g., invoices, legal contracts, ID forms)?
334
+ <client context>, Are there specific complexities or variations in these documents that we should account for (e.g., multi-page documents, handwritten content)?
335
+ Inputs and Outputs:
336
+ <client context>, What are the expected input formats (e.g., PDFs, images, scanned documents)?
337
+ <client context>, What should the processed outputs look like (e.g., structured data, summaries, reports)?
338
+ <client context>, Are there any specific formatting requirements for outputs?
339
+ Document Quality:
340
+ <client context>, Are the documents typically clean and structured, or will the application need preprocessing capabilities (e.g., OCR, noise reduction)?
341
+ <client context>, Do you foresee any challenges with document quality, such as low resolution or inconsistent formatting?
342
+ Workflow Mapping:
343
+ <client context>, Can you describe your current document processing workflow?
344
+ <client context>, What are the major pain points or inefficiencies in the current process?
345
+ <client context>, Which parts of the workflow involve manual interventions, and how would you like to streamline them?
346
+ Integration Points:
347
+ <client context>, What existing systems or third-party tools does the application need to integrate with (e.g., CRMs, ERPs, cloud storage, OCR tools)?
348
+ <client context>, Are there specific APIs, databases, or platforms already in use that we need to consider?
349
+ Security and Compliance:
350
+ <client context>, What security measures are required to protect the data processed by the application?
351
+ <client context>, Are there any industry-specific compliance standards the application must adhere to (e.g., GDPR, HIPAA)?
352
+ Scalability and Performance:
353
+ <client context>, What is the expected volume of documents the application should handle on a daily/weekly/monthly basis?
354
+ <client context>, Are there performance benchmarks or response time requirements that the application must meet?
355
+ User Management and Access Control:
356
+ <client context>, What user roles and permissions will be needed within the application?
357
+ <client context>, How should user authentication and authorization be managed?
358
+ Reporting and Analytics:
359
+ <client context>, What types of analytics or reporting capabilities do you require within the application?
360
+ <client context>, Do you need real-time reporting, or are batch processes sufficient?
361
+ Deployment and Maintenance:
362
+ <client context>, Do you have any preferences for the deployment environment (e.g., cloud-based, on-premises)?
363
+ <client context>, What are your requirements for application maintenance and support post-deployment?
364
+
365
+ Instructions:
366
+ Replace <client context> with relevant information derived from the provided client background.
367
+ Only provide the list of formatted questions without any additional introduction or summary.
368
+ """,
369
  inputs=['project_detail'],
370
  outputs=['generated_page_follow_up_questions'],
371
  model=ModelType.O1_MINI,
 
382
  ),
383
 
384
  "generate_further_follow_up_questions": PromptConfig(
385
+ prompt=
386
+ """
387
+ You are an AI Solution Expert with extensive experience in developing both intelligent chatbots and robust document extraction systems for startups. You will be provided with client background information pertaining to their project requirements, which may include a chatbot solution, a document extraction solution, or both.
388
+
389
+ Your task is to:
390
+ 1. Determine the Project Scope:
391
+ Identify whether the project involves only a chatbot solution, only a document extraction solution, or a hybrid of both.
392
+ 2.Identify Gaps and Clarify Requirements:
393
+ - Generate highly specific and actionable follow-up questions to clarify underlying needs.
394
+ - Utilize frameworks such as the 5 Whys and root cause analysis for deeper exploration.
395
+ - Ensure questions are tailored to the identified project scope (Chatbot, Document Extraction, or both).
396
+
397
+ Requirements:
398
+ You need to FULLY read the input which is given below client background information.
399
+ Generate follow-up questions to identify missing details or ambiguities.
400
+ Use specific references to prior responses for continuity. For example: "You mentioned [context]. Can you elaborate on [specific aspect]?"
401
+ Apply the 5 Whys to delve deeper where necessary. For example: "Why do call centers become overloaded during month-end? Are there specific processes causing bottlenecks?"
402
+ Highlight systemic issues where patterns emerge (e.g., manual processes across multiple challenges).
403
+
404
+ # Output Format:
405
+ # <index><question>(sample answers)
406
+
407
+ Just return the generated list of follow up questions as string and nothing else.
408
+ """,
409
  inputs=["project_detail"],
410
  outputs=["generated_engage_further_follow_up_questions"],
411
  model=ModelType.O1_MINI,
412
+ description="Generate Engage Further Follow Up Questions",
413
  step="Chatbot Prompt Editors",
414
  ui={
415
  "generate_further_follow_up_questions_prompt_editor": UIConfig(
416
  component_type=UIComponentType.TEXTBOX,
417
+ label="Engage Further Follow Up Questions Prompt",
418
  lines=20,
419
  interactive=True
420
  )
 
422
  ),
423
 
424
  "generate_prd": PromptConfig(
425
+ prompt=
426
+ """
427
+ Rewrite this for clarity while keeping all specific details, metrics, and constraints.
428
+ Please take note of the time constraint to build the MVP.
429
+ Do not include context or assumptions beyond the input provided.
430
+ Do not also exclude any input provided.
431
+ Structure the document to ensure clarity and logical flow.
432
+ Make sure the title is "Project Requirements".
433
+ """,
434
  inputs=["project_detail"],
435
  outputs=["generated_prd"],
436
  model=ModelType.O1_MINI,
 
459
  ),
460
 
461
  "generate_intent_list": PromptConfig(
462
+ prompt=
463
+ """
464
+ You are an solution architect and project manager with 20+ years of experience in building chatbots and AI-powered systems.
465
+ Your task is to analyze the provided project requirement document and help me understand the complexity of the project by defining ALL possible intents and workflows for each requirement.
466
+ Additionally, anticipate and suggest realistic, real-life intents that might have been overlooked by the client but are critical for a robust and user-friendly chatbot. You can reference **how other chatbots in the same industry are functioning** and suggest intents that are commonly provided by competitors but may not have been explicitly mentioned by the client. List down these suggestion in one seperate table.
467
+
468
+ Instruction:
469
+ 1. **Break Down Requirements**: Identify all the client requirements from the document.
470
+ 2. **Define ALL Possible Intents and Workflows**: For each requirement, list ALL possible intents and their corresponding workflows. - Use "→" to represent the flow between steps.
471
+ - Provide your analysis in a clear, highly readable and structured table format. The intents to consider are:
472
+ - Simple
473
+ - Complex
474
+ - Multi-Step
475
+ - Single-Step
476
+ - Fallback.Focus on business-centric fallback (e.g., "Order Management Fallback" should address specific order-related issues like invalid SKU, payment failure, or inventory unavailability)
477
+ - Others (if applicable)
478
+
479
+ Output Format:
480
+ Present the analysis in a tabular format with the following structure:
481
+ - Each requirement as the title of a separate table.
482
+ - **Columns**:
483
+ - Intent Type (e.g., Simple, Complex, Multi-Step, etc.)
484
+ - Intent (e.g., Order Status Inquiry, Track Order Progress, etc.)
485
+ - Workflow (describe the technical/logical steps using "→" and numbered steps, e.g., "1. Extract user input → 2. Query database → 3. Return result").
486
+ """,
487
  inputs=["generated_prd"],
488
  outputs=["generated_intent_list"],
489
  model=ModelType.O1_MINI,
 
512
  ),
513
 
514
  "generate_plan_test_components": PromptConfig(
515
+ prompt=
516
+ """
517
+ Context:
518
+ You are an expert in software project planning and testing. Your task is to create a highly detailed, actionable, and project-specific Component List for a software project, focusing exclusively on the Planning and Testing phases. excluding Development phase. The response must align with the project's goals, technical stack, and compliance requirements, ensuring granularity, specificity, and adherence to the provided Project Requirement Document (PRD).
519
+
520
+ Instructions:
521
+ Break the project into the following phases:
522
+ 1. Planning Phase ( Potention Focus: Requirement gathering,Technical architecture design, Resource Allocation.. etc)
523
+ 2. Testing Phase ( Potention Focus: Integration testing, System testing, User Acceptance Testing (UAT) )
524
+
525
+ Components:
526
+ For each phase, include project-specific components that align with the goal of developing the software Project. Break down each phase into granular sub-components, ensuring specificity and alignment with the PRD.
527
+ Use the PRD to extract relevant, granular components that reflect deliverables unique to this project.
528
+
529
+ Tech Stack:
530
+ Backend: FastAPI, Python
531
+ Chatbot: Chatbot Builder , COZE , Yellow.ai
532
+ Infrastructure: AWS, PostgreSQL, Redis, Docker, Alembic
533
+
534
+ Output Format:
535
+ Use bullet points for clarity and ensure each component is concise yet descriptive.
536
+ Include sub-bullets for tasks or subcomponents where necessary to provide additional detail.
537
+ """,
538
  inputs=["generated_prd"],
539
  outputs=["generated_plan_test_components"],
540
  model=ModelType.O1_MINI,
541
+ description="Generate Planning and Testing Components",
542
  step="Step 1 : Scope & Components",
543
  ui={
544
  "generate_plan_test_components_prompt_editor": UIConfig(
 
563
  ),
564
 
565
  "generate_page_dev_components": PromptConfig(
566
+ prompt=
567
+ """
568
+ as a Senior Software Architect with 20+ years of experience, you are tasked to generate a detailed, actionable, and project-specific component list exclusively for the development phase of the project. The list must align with the project's goals, technical stack, and compliance requirements as outlined in the provided Project Requirement Document (PRD), ensuring granularity and specificity.
569
+
570
+ **Specific Requirements:**
571
+ 1. Extract relevant **granular components** from the PRD that reflect the tasks and deliverables unique to this project.
572
+ 2. Components should directly reference functionalities or deliverables related to the project.
573
+ 3. Prioritize components that provide high business value, such as core functionality.
574
+ 4. Organize components into logical categories such as Frontend, Backend, Integration Development, etc., with further subdivisions as needed. Avoid Testing, Support and Maintenance, Documentation, and Training components.
575
+ 5. Ensure deliverables focus entirely on outcomes and deliverables, and consider edge cases, avoiding unnecessary instructions (i.e., avoid filler words).
576
+ 6. Identify key business logic and special edge cases that need to be considered during development, which could impact the system's robustness or functionality.
577
+ 7. **Include a separate section titled "Special Edge Cases Considerations"** to explicitly address unhappy paths. The deliverables in this section should focus solely on development outcomes to address these scenarios.
578
+
579
+ **Output Format:**
580
+ - Title: Component Name as Table Title (e.g., "Frontend Components")
581
+ - Table Structure:
582
+ - **Subcomponent**: Subcomponent name (e.g., "User Authentication and Authorization").
583
+ - **Task**: Task name (e.g., "Login Interface").
584
+ - **Deliverables**: Lists of technical outcomes or deliverables (e.g., "1. Deliverable 1 2. Deliverable 2 3. Deliverable ..."), as much as possible.
585
+ - **Special Edge Cases Considerations**: A separate table to address unhappy paths, with the same structure as above.
586
+
587
+ **Example:**
588
+ Frontend Component
589
+ | **Component** | **Task** | **Deliverables** |
590
+ |--------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------|
591
+ | Document Upload Interface | Implement File Upload | 1. React-based file upload component<br>2. Drag-and-drop functionality<br>3. Progress bar for uploads |
592
+
593
+ **Special Edge Cases Considerations**
594
+ | **Component** | **Task** | **Deliverables** |
595
+ |--------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------|
596
+ | Input Validation | Handle Invalid Inputs | 1. File format validation logic<br>2. File size validation logic<br>3. Error messaging UI for invalid inputs |
597
+
598
+ **Tech Stack:**
599
+ - Backend: FastAPI, Python
600
+ - Frontend: React
601
+ - Infrastructure: AWS, PostgreSQL, Redis, Docker, Alembic
602
+
603
+ **Objective:**
604
+ The final output should deliver a **clear, actionable, and project-specific list of components**, with a separate section for handling unhappy paths. The goal is to provide a foundation for developing granular subcomponents and tasks, ensuring alignment with the unique requirements of this project. A logical grouping and a clean structure that enhance the table's clarity and technical usability is expected.
605
+ """,
606
  inputs=["generated_prd"],
607
  outputs=["generated_page_dev_components"],
608
  model=ModelType.O1_MINI,
 
631
  ),
632
 
633
  "generate_engage_dev_components": PromptConfig(
634
+ prompt=
635
+ """
636
+ **Role**:
637
+ You are a technical project manager and software architect specializing in chatbot development.
638
+ Your task is to create a granular "Development Component List" by extracting components from the **Project Requirement Document (PRD)** and the **Chatbot Intent List (CIL)**.
639
+ The output should align with the **project's specific goals** and focus on **technical implementation**.
640
+
641
+ ### **Objectives**:
642
+ 1. **Complements the CIL**: Ensure the components directly support the intents and workflows defined in the CIL.
643
+ 2. **Aligns with the PRD**: Incorporate project-specific goals, business logic, and technical requirements from the PRD.
644
+ 3. **Focus on Technical Implementation**: Provide actionable development tasks and deliverables.
645
+ 4. **Project-Specific**: Avoid generic components; extract details directly from the PRD and CIL.
646
+ 5. **Granular and Readable**: Maintain a balance between granularity and readability for actionable outputs.
647
+
648
+ **Key Focus**:
649
+ - Translate the **intents, workflows, fallbacks, and unhappy paths** from the CIL into **specific development deliverables** or tasks.
650
+ - Ensure that the deliverables are **actionable** and **context-aware**, aligning with the project's technical stack and business requirements.
651
+
652
+ ### **Inputs**:
653
+ 1. **PRD**: Contains functional, non-functional, technical, and compliance requirements for the chatbot project.
654
+ 2. **CIL**: Lists intents the chatbot will handle, including **fallbacks** and **unhappy paths** with their corresponding workflows.
655
+
656
+ ---
657
+
658
+ ### **Process**:
659
+ 1. **Read and Analyze the PRD and CIL**:
660
+ - Carefully review the PRD to understand the **project's specific goals, business logic, and technical requirements**.
661
+ - Review the CIL to identify the intents, workflows, fallbacks, and unhappy paths the chatbot will handle.
662
+
663
+ 2. **Extract Granular Components**:
664
+ - Identify *ALL components* in the PRD and the CIL.
665
+ - Components should directly reference functionalities or deliverables related to the project.
666
+ - Identify key business logic.
667
+ - Prioritize components that provide high business value, such as core functionality.
668
+ - Organize components into logical categories. Avoid Testing, Support and Maintenance, Documentation and Training components.
669
+
670
+ 3. **Identify Unhappy Paths and Fallbacks**:
671
+ - For each intent including the suggested or additional intents, identify unhappy paths or fallbacks (e.g., errors, exceptions, or edge cases) provided in the CIL. Define **specific development deliverables** or tasks. Examples include:
672
+ - **Order Creation**: Implement SKU validation logic and inventory availability checks.
673
+ - **Payment Processing**: Integrate payment retry mechanisms and notify users of payment status.
674
+ - **Document Extraction**: Add field validation logic and manual upload options for failed extractions.
675
+ - **Input Validation**: Implement file size and type checks, and provide fallback instructions for unsupported inputs.
676
+ - Ensure that the deliverables are **actionable** and **context-aware**.
677
+
678
+ ---
679
+
680
+ ### **Tech Stack**:
681
+ - **Backend**: FastAPI, Python
682
+ - **Chatbot Tools**: Chatbot Builder , COZE , Yellow.ai
683
+ - **Infrastructure**: AWS, PostgreSQL, Redis, Docker, Alembic
684
+
685
+ ---
686
+
687
+ ### **Output**:
688
+ - Compile a list of components from the PRD and CIL.
689
+ - Use a clear, tabular format with **project-specific context** for each component:
690
+ - Avoid using "Handle" for the Subcomponent naming.
691
+
692
+ | **Component** | **Subcomponent** | **Deliverables** |
693
+ |---------------------|------------------------|--------------------------------|
694
+ | **<Component 1>** | **<Subcomponent 1>** | 1. <Deliverable 1> <br> 2. <Deliverable 2> <br> 3. <Deliverable 3> |
695
+
696
+ Return the final tables and nothing else. Do not provide a summary at the end.
697
+ """,
698
  inputs=["generated_prd" , "generated_intent_list"],
699
  outputs=["generated_engage_dev_components"],
700
  model=ModelType.O1_MINI,
 
723
  ),
724
 
725
  "reformat_page_dev_components": PromptConfig(
726
+ prompt=
727
+ """
728
+ As a Senior Software Architect with 20+ years of experience, you are tasked to reformat the generated development components for a document extraction project to ensure consistent and standardized naming conventions. The reformatted components must align with the project's goals, technical stack, and compliance requirements as outlined in the provided Project Requirement Document (PRD).
729
+
730
+ **Specific Requirements:**
731
+ 1. **Standardize Naming Conventions**:
732
+ - Use **descriptive and intuitive names** for components, subcomponents, tasks, and deliverables.
733
+ - Ensure naming is **consistent across all tables** (Frontend, Backend, Integration, Infrastructure, Database, and Special Edge Cases).
734
+ - Avoid overly technical jargon unless necessary for clarity.
735
+ - Use **action-oriented language** for tasks and deliverables (e.g., "Implement file upload functionality" instead of "File upload functionality").
736
+
737
+ 2. **Common Web Development Document Extraction Components**:
738
+ - Include standard components such as:
739
+ - **Document Upload Interface**
740
+ - **Document Processing Engine**
741
+ - **Data Validation Module**
742
+ - **OCR Integration**
743
+ - **Data Reconciliation Module**
744
+ - **Report Generation Interface**
745
+ - **User Authentication and Authorization**
746
+ - **Notification and Alert System**
747
+ - **API Integrations**
748
+ - **Database Schema Design**
749
+ - **Error Handling and Logging**
750
+ - **Scalability and Load Balancing**
751
+
752
+ 3. **Reformat the Generated Components**:
753
+ - Ensure all components, subcomponents, tasks, and deliverables follow the standardized naming conventions.
754
+ - Group related tasks and deliverables under the appropriate subcomponents.
755
+ - Maintain a clean and logical structure for readability and technical usability.
756
+
757
+ **Output Format:**
758
+ - Title: Component Name as Table Title (e.g., "Frontend Components")
759
+ - Table Structure:
760
+ - **Subcomponent**: Subcomponent name (e.g., "Document Upload Interface").
761
+ - **Task**: Task name (e.g., "Implement file upload functionality").
762
+ - **Deliverables**: Lists of technical outcomes or deliverables
763
+ """,
764
  inputs=["generated_page_dev_components"],
765
  outputs=["reformatted_dev_components"],
766
  model=ModelType.O1_MINI,
 
789
  ),
790
 
791
  "reformat_engage_dev_components": PromptConfig(
792
+ prompt=
793
+ """
794
+ **Role**:
795
+ You are a technical project manager and software architect responsible for ensuring consistent naming conventions in chatbot development. Your task is to standardize the naming of common chatbot components and subcomponents while preserving all existing components and subcomponents, including unique, domain-specific components.
796
+
797
+ ### **Process**:
798
+ 1. **Analyze the Development Component List (DCL)**:
799
+ - Identify components and subcomponents that are common across chatbot projects (e.g., "Chatbot Engine" vs. "Chatbot Interface").
800
+ - Identify domain-specific components that should remain unchanged.
801
+ - Ensure that technical terminology and categorization are consistent across component , particularly for components of similar concepts to maintain uniformity.
802
+
803
+ 2. **Standardize Common Chatbot Components**:
804
+ - Cross-check common components against a standardized reference list (see below).
805
+ - Rename only the common chatbot components to their standardized equivalent while keeping the intended meaning intact.
806
+ - Ensure all subcomponents are retained and placed under the correct standardized component.
807
+
808
+ 3. **Preserve All Existing Components and Subcomponents**:
809
+ - Ensure that no components or subcomponents are removed during standardization
810
+ - If a component is highly specific to a particular use case, do not modify it
811
+ - Retain any unique features that are tailored to the project
812
+ - Verify that each component has appropriate subcomponents and deliverables
813
+
814
+ ### **Standardized Naming Conventions (for Common Chatbot Components)**:
815
+ | **Standard Component** | **Common Subcomponents** |
816
+ |------------------------------|--------------------------------|
817
+ | Core Chatbot Engine | Intent Recognition, NER, Context Management, Dialog Flow, Response Generation, Multi-turn Handling |
818
+ | LLM Processing Pipeline | Text Preprocessing, Entity Extraction |
819
+ | Integration Layer | API Gateway, CRM/ERP Integration, Knowledge Base Integration, Third-party Services |
820
+ | Data Management | ETL Pipelines, Database Operations, Data Versioning, Cache Management |
821
+ | Security and Compliance | Authentication, Authorization, Data Encryption, Audit Logging, GDPR/PDPA Compliance |
822
+ | Infrastructure | AWS/Cloud Setup, Docker Containerization, CI/CD Pipeline, Environment Management |
823
+ | Error Handling | Error Logging, Exception Management, Fallback Responses, Escalation Workflows |
824
+ | User Management | User Authentication, Profile Management, Session Handling, Role-based Access |
825
+ | Monitoring & Analytics | Performance Metrics, Usage Analytics, Error Tracking, Real-time Dashboards |
826
+ | Feedback & Learning | User Feedback Collection, Model Performance Tracking, A/B Testing, Continuous Learning |
827
+ | Conversation Management | Context Retention, Topic Switching, Language Support, Flow Control |
828
+ | Channel Management | Web Integration, Mobile Support, Social Media Platforms |
829
+ | AI/ML Components | LLM Integration, RAG Implementation, Fine-tuning Pipeline, Vector Store Management |
830
+ | Business Logic Layer | Custom Rules Engine, Workflow Automation, Business Process Integration |
831
+ | Knowledge Management | Content Management, FAQ Updates, Knowledge Graph, Document Processing |
832
+ | Human Handoff | Agent Routing, Queue Management, Conversation History Transfer, Live Chat Support |
833
+ | Performance & Scaling | Load Balancing, Auto-scaling, Performance Optimization, Resource Management |
834
+ | Development Tools | Testing Framework, Debugging Tools, Development Environment, Documentation |
835
+
836
+ ### **Output**:
837
+ - Return the Updated Component List in a clear, structured format.
838
+ - Maintain the tabular format while applying consistent naming conventions.
839
+ - Each component should be distinct and well-defined.
840
+ - No repeated components or overlapping functionality.
841
+ - Ensure that all components and subcomponents are included in the output.
842
+ - Provide only the corrected table and nothing else.
843
+ """,
844
  inputs=["generated_engage_dev_components"],
845
  outputs=["reformatted_dev_components"],
846
  model=ModelType.O1_MINI,
 
869
  ),
870
 
871
  "reformat_hybrid_dev_components": PromptConfig(
872
+ prompt=
873
+ """
874
+ You are an expert in software architecture and systems integration. Your task is to merge two component lists: (1) a chatbot development component list and (2) a general/document-related component list. The goal is to create a unified list that consolidates overlapping elements while eliminating redundancies.
875
+
876
+ Follow these steps:
877
+
878
+ Identify Similarities: Compare both lists and merge similar components without losing granularity. Retain specific steps and detailed tasks.
879
+ Remove Redundancies: Eliminate duplicate entries by consolidating their tasks and deliverables while preserving the essential details of each task. Ensure that no components are removed
880
+ Structure Logically: Organize the final list into major categories (e.g., Frontend, Backend, Document Handling.)
881
+
882
+ Output Format: Present the final list in a structured tabular format with the following columns:
883
+ Component: The high-level functionality grouping.
884
+ Subcomponent: A more granular breakdown of each function.
885
+ Deliverables: A list of specific tasks associated with the subcomponent. Avoid Filler words("Enable", "Support", "Implement")
886
+
887
+ Output:
888
+ A refined, logically structured, and optimized merged component list in tabular format.
889
+
890
+ """,
891
  inputs=["generated_engage_dev_components","generated_page_dev_components"],
892
  outputs=["reformatted_dev_components"],
893
  model=ModelType.O1_MINI,
894
+ description="Reformat Hybrid Development Components",
895
  step="Step 1 : Scope & Components",
896
  ui={
897
  "reformat_hybrid_dev_components_prompt_editor": UIConfig(
 
916
  ),
917
 
918
  "generate_intents_csv": PromptConfig(
919
+ prompt=
920
+ """
921
+ You will be provided with an intent list. Your task is to convert the entire list into a CSV string, ensuring that all rows from the list are included without omission. The CSV should contain the following columns: "intent_type", "intent", and "workflow". Follow these rules:
922
+
923
+ Enclose all text values, including the column headers, in double quotes ("").
924
+ Retain numeric values as-is, without any quotes.
925
+ Ensure the CSV string is formatted correctly, with commas separating the values and each row represented on a new line.
926
+ Your response must include every row and correctly reflect the structure and content of the intent list. Just return the CSV text and nothing else. Omit any code guards ```
927
+ """,
928
  inputs=["generated_intent_list"],
929
  outputs=["generated_intents_csv"],
930
  model=ModelType.O1_MINI,
 
948
  ),
949
 
950
  "generate_page_plan_test_mandays": PromptConfig(
951
+ prompt=
952
+ """
953
+ You are an experienced project manager tasked to create a detailed task breakdown for a project based on the planning and testing component list.
954
+
955
+ Objective:
956
+ Generate a structured CSV output with manday estimates for each planning and testing component.
957
+
958
+ Instructions:
959
+ 1. Use the planning and testing component list to identify all components
960
+ 2. For each component:
961
+ - Estimate mandays between 0.2 and 5 days based on real-world complexity
962
+ - Provide a clear description of deliverables and outcomes
963
+ - Ensure estimates account for potential delays or complications
964
+
965
+ Output Format Requirements:
966
+ - Generate a CSV with EXACTLY these column headers: "component,mandays,description"
967
+ - Each row must have all three columns filled
968
+ - Numeric values should not be quoted
969
+ - Text values must be enclosed in double quotes
970
+ - No empty rows or missing values
971
+
972
+ Example Output:
973
+ component,mandays,description
974
+ "Project Planning",2.5,"Detailed project planning including timeline and resource allocation"
975
+ "Requirements Analysis",1.5,"Analysis and documentation of system requirements"
976
+
977
+ Return only the CSV content, no code blocks or additional text.
978
+ """,
979
  inputs=["generated_plan_test_components"],
980
  outputs=["generated_plan_test_mandays"],
981
  model=ModelType.O1_MINI,
982
+ description="Generate Page planning and testing mandays",
983
  step="Step 2 : Mandays & Quotation",
984
  sub_step="Step 2.1 : Generate Mandays",
985
  ui={
 
998
  ),
999
 
1000
  "generate_engage_plan_test_mandays": PromptConfig(
1001
+ prompt=
1002
+ """
1003
+ You are an experienced project manager tasked to create a detailed task breakdown for a project based on the planning and testing component list and the development component list.
1004
+
1005
+ Objective:
1006
+ Generate a structured output organized by component. Provide a table with the following columns:
1007
+ - Component: The name of component, as defined in the component list. ()
1008
+ - Manday: The estimated effort required for a one-person team to complete the task, based on real-world complexity and scope.
1009
+ - Description: A detailed explanation of the task, including deliverables or outcomes, as defined in the component list.
1010
+
1011
+ Instruction:
1012
+ 1. Input:
1013
+ - Use the planning and testing component list identify all components and subcomponents. The hierarchy of the document is Phase -> Component -> Subcomponent -> Task
1014
+
1015
+ 2. Manday Estimation:
1016
+ Assign a manday estimate for each component based on the complexity and effort required, ensuring it falls between 0.2 and 5 days.
1017
+ Ensure estimates are based on real-world complexity and scope while accounting for potential delays or complications.
1018
+
1019
+ **Output Format**:
1020
+ Create a CSV file with the following columns:
1021
+ "component",,"subcomponent","mandays","description"
1022
+ Just return the csv text and NOTHING else, omit the ``` code guards.
1023
+ """,
1024
  inputs=["generated_plan_test_components"],
1025
  outputs=["generated_plan_test_mandays"],
1026
  model=ModelType.O1_MINI,
1027
+ description="Generate Engage planning and testing mandays",
1028
  step="Step 2 : Mandays & Quotation",
1029
  sub_step="Step 2.1 : Generate Mandays",
1030
  ui={
 
1043
  ),
1044
 
1045
  "generate_hybrid_plan_test_mandays": PromptConfig(
1046
+ prompt=
1047
+ """
1048
+ You are an experienced project manager tasked to create a detailed task breakdown for a Hybrid project involving both Document Extraction and Chatbot functionalities. The project involves planning and testing components related to document processing, extraction, user interactions, and conversational workflows.
1049
+
1050
+ Objective:
1051
+ Generate structured CSV outputs with manday estimates for each planning and testing component, separated into Document Extraction and Chatbot sections.
1052
+
1053
+ Instructions:
1054
+ 1. Input:
1055
+ - Use the planning and testing component list to identify all components and subcomponents.
1056
+ - The hierarchy of the document is: Phase -> Component -> Subcomponent -> Task.
1057
+
1058
+ 2. Manday Estimation:
1059
+ - Assign manday estimates for each component or subcomponent based on complexity and effort required, ranging from 0.2 to 5 days.
1060
+ - Ensure estimates account for real-world complexity, potential delays, and complications.
1061
+
1062
+ 3. Output Format Requirements:
1063
+ - Generate TWO separate CSV files:
1064
+ - First, for the Document Extraction section:
1065
+ - Columns: "component, mandays, description".
1066
+ - Description must include deliverables and outcomes.
1067
+ - Second, for the Chatbot section:
1068
+ - Columns: "component, subcomponent, mandays, description".
1069
+ - Subcomponents must be clearly listed under each component.
1070
+ - Description must include deliverables and outcomes.
1071
+ - Clearly indicate Section Breaks between the two files by including:
1072
+ ----SECTION BREAK----
1073
+
1074
+ 4. Output:
1075
+ - Return **ONLY** the CSV content, no code blocks or additional text.
1076
+ - Ensure all rows have all columns filled.
1077
+ - Numeric values should not be quoted.
1078
+ - Text values must be enclosed in double quotes.
1079
+ - No empty rows or missing values.
1080
+ - Omit the ``` code guards.
1081
+ """,
1082
  inputs=["generated_plan_test_components"],
1083
  outputs=["generated_plan_test_mandays"],
1084
  model=ModelType.O1_MINI,
1085
+ description="Generate Hybrid planning and testing mandays",
1086
  step="Step 2 : Mandays & Quotation",
1087
  sub_step="Step 2.1 : Generate Mandays",
1088
  ui={
 
1101
  ),
1102
 
1103
  "generate_dev_mandays": PromptConfig(
1104
+ prompt=
1105
+ """
1106
+ You are an experienced project manager tasked to create a detailed task breakdown for a project based on the development component list.
1107
+
1108
+ Objective:
1109
+ Generate a structured output organized by component. Provide a table with the following columns:
1110
+ - Component: The name of component, as defined in the component list.
1111
+ - Subcomponent: The subcategory or module within the component, as defined in the component list.
1112
+ - Manday: The estimated effort required for a one-person team to complete the task, based on real-world complexity and scope.
1113
+ - Description: A detailed explanation of the task, including deliverables or outcomes, as defined in the component list.
1114
+
1115
+ Instruction:
1116
+ 1. Input:
1117
+ - Use the planning and testing component list and the development component list to identify all components, subcomponents, tasks and subtasks.
1118
+
1119
+ 2. Manday Estimation:
1120
+ For each subcomponent, estimate the manday based on the effort required for a single person to complete it,, ensuring it falls between 0.5 and 7 days.
1121
+ - Ensure estimates are based on real-world complexity and scope while accounting for potential delays or complications.
1122
+
1123
+ **Output Format**:
1124
+ Create a CSV file with the following columns:
1125
+ "component","subcomponent","mandays","description"
1126
+ Make sure you encapsulate all TEXT column in "", and keep numeric columns as is.
1127
+ Just return the csv text and NOTHING else, omit the ``` code guards.
1128
+ Numeric values should not be quoted
1129
+ Text values must be enclosed in double quotes
1130
+ """,
1131
  inputs=["reformatted_dev_components"],
1132
  outputs=["generated_dev_mandays"],
1133
  model=ModelType.O1_MINI,
 
1151
  ),
1152
 
1153
  "analyze_planning_testing_mandays": PromptConfig(
1154
+ prompt=
1155
+ """
1156
+ You are an experienced project manager tasked with analyzing the provided planning and testing mandays estimates. Your goal is to identify the highest priority components that must be completed to build the MVP. Focus on components that deliver **immediate value to users** and are **critical for the core functionality** of the product.
1157
+
1158
+ Objective:
1159
+ Identify the highest priority planning and testing components that are critical for the MVP's core functionality and deliver immediate value to the business. Exclude all non-critical components that do not directly contribute to the MVP's primary functionality.
1160
+
1161
+ Key Guidelines:
1162
+ - Focus on Core Functionality: Only include components that are essential for the MVP to function and deliver immediate value to users.
1163
+ - Exclude Non-Critical Components: Do not include components related to advanced security, compliance, scalability, authentication, fallback mechanisms, or any feature that is not absolutely necessary for the MVP.
1164
+ - Prioritize Business Value: Ensure the selected components align with the business's core objectives and deliver measurable value.
1165
+ - Minimalistic Approach: Focus on the least effort required to deliver the most value.
1166
+
1167
+ Output Format Requirements:
1168
+ - Generate a CSV with EXACTLY these column headers: "component,mandays,description"
1169
+ - Each row must have all three columns filled
1170
+ - Numeric values should not be quoted
1171
+ - Text values must be enclosed in double quotes
1172
+ - No empty rows or missing values
1173
+
1174
+ Return only the CSV content, no code blocks or additional text.
1175
+ """,
1176
  inputs=["generated_plan_test_mandays"],
1177
  outputs=["identified_planning_testing_components"],
1178
  model=ModelType.O1_MINI,
 
1196
  ),
1197
 
1198
  "analyze_development_mandays": PromptConfig(
1199
+ prompt=
1200
+ """
1201
+ You are an experienced project manager tasked with analyzing the provided development mandays estimates. Your goal is to assign a priority level to each development component based on its importance to the MVP's core functionality and business value. Focus exclusively on components that deliver immediate value to users and are critical for the core functionality of the product.
1202
+
1203
+ Key Guidelines:
1204
+ - **Focus on Core Functionality:** Assign priority levels based on how essential each component is for the MVP to function and deliver immediate value to users.
1205
+ - **Exclude Non-Critical Considerations:** Do not label components related to advanced security, compliance, scalability, authentication, fallback mechanisms, or any feature that is **NOT** absolutely necessary for the MVP as "high" priority.
1206
+ - **Prioritize Business Value:** Ensure the priority levels align with the business's core objectives and deliver measurable value.
1207
+ - **Minimalistic Approach:** Focus on the least effort required to deliver the most value when assigning priority levels.
1208
+ - **Assign Priority Levels:** Label each development component as "high," "medium," or "low" priority based on its importance to the MVP's core functionality and business value.
1209
+
1210
+ Objective:
1211
+ Assign a priority level ("high," "medium," or "low") to each development component, ensuring the output reflects the importance of each component to the MVP's core functionality and business value. Retain all original components in the list.
1212
+
1213
+ Important Notes:
1214
+ - If a component is not directly tied to the core functionality or can be deferred to a later phase, assign it a lower priority ("medium" or "low").
1215
+ - Do not exclude any components from the list, even if they are low priority.
1216
+ - Only assign "high" priority to components that are absolutely necessary for the MVP to function.
1217
+
1218
+ Output Format Requirements:
1219
+ Convert the entire list into a CSV string, ensuring that all rows from the list are included without omission. The CSV should contain the following columns: "component","subcomponent","mandays","description". Follow these rules:
1220
+
1221
+ Enclose all text values, including the column headers, in double quotes ("").
1222
+ Retain numeric values as-is, without any quotes.
1223
+ Ensure the CSV string is formatted correctly, with commas separating the values and each row represented on a new line.
1224
+ Your response must include every row and correctly reflect the structure and content of the intent list. Just return the CSV text and nothing else. Omit any code guards ```.
1225
+ """,
1226
  inputs=["generated_dev_mandays"],
1227
  outputs=["identified_development_components"],
1228
  model=ModelType.O1_MINI,
 
1246
  ),
1247
 
1248
  "analyze_MVP_intents": PromptConfig(
1249
+ prompt=
1250
+ """
1251
+ You are an experienced project manager tasked with analyzing the provided intents mandays estimates. Your goal is to assign a priority level to each intents based on its importance to the MVP's core functionality and business value. Focus exclusively on intents that deliver immediate value to users and are critical for the core functionality of the product.
1252
+
1253
+ Key Guidelines:
1254
+ - **Focus on Core Functionality:** Assign priority levels based on how essential each intents is for the MVP to function and deliver immediate value to users.
1255
+ - **Exclude Non-Critical Considerations:** Do not label intents related to advanced security, compliance, scalability, authentication, fallback mechanisms, or any feature that is **NOT** absolutely necessary for the MVP as "high" priority.
1256
+ - **Prioritize Business Value:** Ensure the priority levels align with the business's core objectives and deliver measurable value.
1257
+ - **Minimalistic Approach:** Focus on the least effort required to deliver the most value when assigning priority levels.
1258
+ - **Assign Priority Levels:** Label each development intents as "high," "medium," or "low" priority based on its importance to the MVP's core functionality and business value.
1259
+
1260
+ Objective:
1261
+ Assign a priority level ("high," "medium," or "low") to each development intents , ensuring the output reflects the importance to the MVP's core functionality and business value. Retain all original components in the list.
1262
+
1263
+ Important Notes:
1264
+ - If a component is not directly tied to the core functionality or can be deferred to a later phase, assign it a lower priority ("medium" or "low").
1265
+ - Do not exclude any intents rom the list, even if they are low priority.
1266
+ - Only assign "high" priority to intents that are absolutely necessary for the MVP to function.
1267
+
1268
+ Output Format Requirements:
1269
+ Convert the entire list into a CSV string, ensuring that all rows from the list are included without omission. The CSV should contain the following columns: "intent_type", "intent", and "workflow". Follow these rules:
1270
+
1271
+ Enclose all text values, including the column headers, in double quotes ("").
1272
+ Retain numeric values as-is, without any quotes.
1273
+ Ensure the CSV string is formatted correctly, with commas separating the values and each row represented on a new line.
1274
+ Your response must include every row and correctly reflect the structure and content of the intent list. Just return the CSV text and nothing else. Omit any code guards ```.
1275
+ """,
1276
  inputs=["generated_intent_list"],
1277
  outputs=["identified_mvp_intents"],
1278
  model=ModelType.O1_MINI,
 
1296
  ),
1297
 
1298
  "recalculate_page_MVP_mandays": PromptConfig(
1299
+ prompt=
1300
+ """
1301
+ Based on the identified priority components from the previous analysis, your task is to recalculate the mandays estimates to ensure they fit within the time given (e.g., days or weeks) in building the MVP mentioned in the Project Requirement Document (PRD).
1302
+
1303
+ Objective:
1304
+ Reread the entire PRD to identify the timelimit of the MVP, then recalculate the manday estimates for the identified priority development components to ensure they fit within the timeline specified in the Project Requirement Document (PRD) to build the MVP. Adjust development mandays while maintaining feasibility and ensuring core functionality delivery.
1305
+
1306
+ Key Guidelines:
1307
+ 1. Convert Time Estimates: Convert all time estimates into raw mandays:
1308
+ - 1 week = 7 mandays
1309
+ - 1 month = 28-31 mandays
1310
+ - If input is in days, use as is.
1311
+ - If input is a range (e.g., 1-2 weeks), use the average (1.5 weeks = 7.5 mandays).
1312
+ 2. **Preserve All Components:** Do not remove any components from the list, even if they are low priority.
1313
+ 3. **Adjust Mandays Based on Priority:**
1314
+ - For **high priority components**, allocate the majority of mandays (e.g., 1-3) to ensure critical functionalities are minimally viable. However, components such as authentication, security, etc., can be set to 0.
1315
+ - For **medium priority components**, allocate minimal mandays (e.g., 1-2) or set them to 0, as these can be deferred without impacting core functionalities.
1316
+ - For **low priority components**, set mandays to 0, as these are not essential for the MVP.
1317
+ 4. **Retain Planning/Testing Components:** All identified planning and testing components must be retained in the list but have their mandays set to 0. Do not remove them.
1318
+ 5. **Iterative Adjustment:** If the total mandays for the development components still exceed the MVP timeline after the first adjustment, further reduce or set the mandays to 0 on high and medium priority components if necessary until the total fits within the timeline.
1319
+ - **MVP Timeline Constraint:** Ensure the final total mandays do not exceed the MVP timeline.
1320
+ 6. **Justify Adjustments:** Provide a brief explanation for any adjustments made to the mandays, ensuring they align with the timeline and maintain the MVP's core functionality.
1321
+
1322
+ Output Format Requirements:
1323
+ - **Separate Planning/Testing Components and Development Components:**
1324
+ - Planning/Testing Components:
1325
+ component,mandays,description,priority
1326
+ "Requirements Analysis",0,"Critical user requirements and system specifications","High"
1327
+ - Development Components:
1328
+ component,subcomponent,mandays,description,priority
1329
+ "Frontend","Document Upload Interface",3,"Essential for core functionality of automated document processing","High"
1330
+ - Include the "Priority" column for all components.
1331
+ - Include the total mandays for the revised plan and confirm whether it fits within the MVP timeline.
1332
+ - Ensure the output is concise and actionable.
1333
+
1334
+ Important Notes:
1335
+ - If the total mandays still exceed the timeline after adjustments, prioritize further reductions in medium priority components while keeping high priority components as intact as possible.
1336
+ - Do not remove any components from the list, even if their mandays are reduced to 0.
1337
+ """,
1338
  inputs=["identified_planning_testing_components", "identified_development_components" , "generated_prd"],
1339
  outputs=["revised_mandays_estimates"],
1340
  model=ModelType.O1_MINI,
 
1364
  ),
1365
 
1366
  "recalculate_engage_MVP_mandays": PromptConfig(
1367
+ prompt=
1368
+ """
1369
+ Based on the identified priority components and the identified priority intent list from the previous analysis , your task is to recalculate the mandays estimates to ensure they fit within the time given (e.g., days or weeks) in building the MVP mentioned in the Project Requirement Document (PRD).
1370
+
1371
+ Objective:
1372
+ Read the entire PRD and identify the time constraint ; Recalculate the manday estimates for both development components and intents to ensure the total combined mandays fit within the MVP timeline specified in the Project Requirement Document (PRD). Adjust mandays while maintaining feasibility and ensuring core functionality delivery. Retain all components and intents in the list, even if their mandays are set to 0.
1373
+
1374
+ Key Guidelines:
1375
+ 1. Total Combined Mandays Constraint:
1376
+ The combined total mandays for development components and intents must not exceed MVP timeline.
1377
+ Ensure the total is calculated as:
1378
+ Total Combined Mandays = Development Components Mandays + Intents Mandays ≤ MVP timeline.
1379
+ Convert Time Estimates:
1380
+ Convert all time estimates into raw mandays:
1381
+ - 1 week = 7 mandays
1382
+ - 1 month = 28-31 mandays
1383
+ - If input is in days, use as is.
1384
+ - If input is a range (e.g., 1-2 weeks), use the average (1.5 weeks = 7.5 mandays).
1385
+
1386
+ 2. Preserve All Components and Intents:
1387
+
1388
+ 3. Do not remove any components or intents from the list, even if their mandays are set to 0.
1389
+
1390
+ 4. Retain all planning and testing components but set their mandays to 0.
1391
+
1392
+ 5. Adjust Mandays Based on Priority:
1393
+ - For high priority, allocate the majority of mandays to ensure critical functionalities are minimally viable. Significant reductions may be necessary for non-core features (e.g., authentication, security).
1394
+ - For medium priority, allocate minimal mandays or set them to 0, as these can be deferred without impacting core functionalities.
1395
+ - For low priority, set mandays to 0, as these are not essential for the MVP.
1396
+
1397
+ 6. Iterative Adjustment:
1398
+ - If the total combined mandays still exceed the MVP timeline after the first adjustment, further reduce mandays for both high and medium priority components and intents until the total fits within the timeline.
1399
+ - Set mandays to 0 for non-essential components and intents if necessary, but do not remove them from the list.
1400
+
1401
+ 7. Strict Calculation for Intents:
1402
+ Properly calculate the total mandays for development components and MVP intents. Ensure the total combined mandays for both development components and MVP intents DO NOT exceed the MVP timeline.
1403
+
1404
+ 8. Justify Adjustments:
1405
+ - Provide a brief explanation for any adjustments made to the mandays, ensuring they align with the timeline and maintain the MVP's core functionality.
1406
+
1407
+ Output Format Requirements:
1408
+ - **Separate Planning/Testing Components , Development Components and MVP Intents:**
1409
+ - Planning/Testing Components:
1410
+ component,mandays,description,priority
1411
+ "Requirements Analysis",0,"Critical user requirements and system specifications","High"
1412
+ - Development Components:
1413
+ component,subcomponent,mandays,description,priority
1414
+ "Frontend","Document Upload Interface",3,"Essential for core functionality of automated document processing","High"
1415
+ - MVP Intents:
1416
+ intent_type,intent,workflow,mandays
1417
+ "Single-Step","Create New Order","1. User initiates order creation → 2. Extract order details from user input → 3. Save order to database → 4. Confirm creation to user",0.3
1418
+ - Include the "Priority" column for all components.
1419
+ - Include the total mandays for the revised plan and confirm whether it fits within the MVP timeline.
1420
+ - Ensure the output is concise and actionable.
1421
+
1422
+ Important Notes:
1423
+ - If the total mandays still exceed the timeline after adjustments, prioritize further reductions in medium priority components and intents while keeping high priority components as intact as possible.
1424
+ - Do not remove any components or intents from the list, even if their mandays are reduced to 0.
1425
+ """,
1426
  inputs=["identified_planning_testing_components", "identified_development_components", "identified_mvp_intents", "generated_prd"],
1427
  outputs=["revised_mandays_estimates"],
1428
  model=ModelType.O1_MINI,
 
1452
  ),
1453
 
1454
  "generate_page_MVP_mandays": PromptConfig(
1455
+ prompt=
1456
+ """
1457
+ Using the revised mandays estimates from the previous step, format the output into two clearly separated CSV sections: one for Planning/Testing Components and another for Development Components.
1458
+
1459
+ Objective:
1460
+ Structure the output to clearly delineate the two sections while maintaining the original column structure. Ensure that there are no duplicate components or subcomponents in the output.
1461
+
1462
+ Output Format Requirements:
1463
+ - First Section - Planning & Testing Components:
1464
+ component,mandays,description
1465
+ "Requirements Analysis",0,"Critical user requirements and system specifications"
1466
+
1467
+ - Second Section - Development Components:
1468
+ component,subcomponent,mandays,description
1469
+ "Backend","Core API Development",3,"Essential API endpoints for basic functionality"
1470
+
1471
+ Important:
1472
+ - Each section must maintain its original column structure.
1473
+ - Sections MUST be separated by the marker: "----SECTION BREAK----".
1474
+ - Include ALL components but ensure there are no duplicates. If a component or subcomponent appears in both sections, include it only in the most relevant section.
1475
+ - No empty rows or missing values.
1476
+ - Text values must be in double quotes.
1477
+ - Numbers must not be in quotes.
1478
+
1479
+ Return only the CSV content with the section break, no additional text or explanations.
1480
+ """,
1481
  inputs=["revised_mandays_estimates"],
1482
  outputs=["generated_MVP_mandays"],
1483
  model=ModelType.O1_MINI,
 
1507
  ),
1508
 
1509
  "generate_engage_MVP_mandays": PromptConfig(
1510
+ prompt=
1511
+ """
1512
+ Using the revised mandays estimates from the previous step, format the output into three clearly separated CSV sections: one for Planning/Testing Components, one for Development Components, and another for MVP Intents.
1513
+
1514
+ Objective:
1515
+ Structure the output to clearly delineate the two sections while maintaining the original column structure. Ensure that there are no duplicate components or subcomponents in the output.
1516
+
1517
+ Output Format Requirements:
1518
+ - First Section - Planning & Testing Components:
1519
+ component,mandays,description
1520
+ "Requirements Analysis",0,"Critical user requirements and system specifications"
1521
+
1522
+ - Second Section - Development Components:
1523
+ component,subcomponent,mandays,description
1524
+ "Backend","Core API Development",3,"Essential API endpoints for basic functionality"
1525
+
1526
+ - Third Section - MVP Intents:
1527
+ intent_type,intent,workflow,mandays
1528
+ "Single-Step","Create New Order","1. User initiates order creation → 2. Extract order details from user input → 3. Save order to database → 4. Confirm creation to user",0.3
1529
+
1530
+ Important:
1531
+ - Each section must maintain its original column structure.
1532
+ - Sections MUST be separated by the marker: "----SECTION BREAK----".
1533
+ - Include ALL the components and intents.
1534
+ - No empty rows or missing values.
1535
+ - Text values must be in double quotes.
1536
+ - Numbers must not be in quotes.
1537
+
1538
+ Return only the CSV content with the section break, no additional text or explanations.
1539
+ """,
1540
  inputs=["revised_mandays_estimates"],
1541
  outputs=["generated_MVP_mandays"],
1542
  model=ModelType.O1_MINI,
 
1572
  ),
1573
 
1574
  "generate_page_MVP_prd": PromptConfig(
1575
+ prompt=
1576
+ """
1577
+ Generate a comprehensive and structured Project Requirement Document (PRD) for a Minimum Viable Product (MVP) using the following inputs:
1578
+ 1. **General PRD Guidelines**: Use the provided general PRD as a framework for structure, tone, and level of detail. This includes sections like Introduction, Scope, Functional Requirements, Non-Functional Requirements, Technical Architecture, Workflow, Testing, Deployment, and Appendices.
1579
+ 2. **Revised Mandays Estimates**: Incorporate the specific MVP components, including their descriptions, mandays estimates, and functionalities, into the relevant sections of the PRD. Ensure all details are accurately reflected.
1580
+
1581
+ Follow these instructions:
1582
+ - Retain ALL specific details, metrics, and constraints from both the general PRD and MVP components.
1583
+ - Pay special attention to the time constraint for building the MVP, as outlined in the inputs.
1584
+ - Do not add any context or assumptions beyond the provided inputs.
1585
+ - Do not exclude any details from the inputs.
1586
+ - Structure the document to ensure clarity, logical flow, and readability and use tabular format whenever possible.
1587
+ - Use the title "Project Requirements" for the document.
1588
+
1589
+ The output should be a well-organized PRD that combines the general PRD guidelines with the specific MVP components, ensuring alignment with the project's goals and constraints.
1590
+ """,
1591
  inputs=["generated_prd" , "generated_MVP_mandays"],
1592
  outputs=["generated_mvp_prd"],
1593
  model=ModelType.O1_MINI,
 
1616
  ),
1617
 
1618
  "generate_engage_MVP_prd": PromptConfig(
1619
+ prompt=
1620
+ """
1621
+ Generate a comprehensive and structured Project Requirement Document (PRD) for a Minimum Viable Product (MVP) using the following inputs:
1622
+ 1. **General PRD Guidelines**: Use the provided general PRD as a framework for structure, tone, and level of detail. This includes sections like Introduction, Scope, Functional Requirements, Non-Functional Requirements, Technical Architecture, Workflow, Testing, Deployment, and Appendices.
1623
+ 2. **Revised Mandays Estimates**: Incorporate the specific MVP components & intents, including their descriptions, mandays estimates, and functionalities, into the relevant sections of the PRD. Ensure all details are accurately reflected.
1624
+
1625
+ Follow these instructions:
1626
+ - Retain ALL specific details, metrics, and constraints from both the general PRD and MVP components.
1627
+ - Pay special attention to the time constraint for building the MVP, as outlined in the inputs.
1628
+ - Do not add any context or assumptions beyond the provided inputs.
1629
+ - Do not exclude any details from the inputs.
1630
+ - Structure the document to ensure clarity, logical flow, and readability and structure tabular format if necessary.
1631
+ - Use the title "Project Requirements" for the document.
1632
+
1633
+ The output should be a well-organized PRD that combines the general PRD guidelines with the specific MVP components, ensuring alignment with the project's goals and constraints.
1634
+ """,
1635
  inputs=["generated_prd" , "generated_MVP_mandays"],
1636
  outputs=["generated_mvp_prd"],
1637
  model=ModelType.O1_MINI,
 
1660
  ),
1661
 
1662
  "generate_page_BD_SOW": PromptConfig(
1663
+ prompt=
1664
+ """
1665
+ As an project manager with 20+ years of experience, you are tasked to create a detailed Scope of Work (SOW) document. Analyze the provided project component list and scope document to generate the following sections. Follow the guidelines below to ensure a professional, structured, and client-ready output:
1666
+
1667
+ ### **Scope of Work (SOW)**
1668
+
1669
+ #### **1. Project Background**
1670
+ - Provide a brief overview of the project, including the context, problem statement, and why the project is being initiated.
1671
+ - Break down key challenges (in bullet point) the company currently facing, quantifying the impacts where possible (e.g., lost revenue, downtime).
1672
+ - Close this section with industry trends or other relevant background information, emphasizing risks of inaction leading to the project, in 2-3 sentences.
1673
+
1674
+ #### **2. Project Objective**
1675
+ - Clearly define the project's primary goals and what constitutes success, using the following structure:
1676
+ - Goals: List specific, measurable goals (e.g., reduce processing time by 20%).
1677
+ - Outcomes: Describe tangible deliverables and metrics for success.
1678
+
1679
+ #### **3. Project Buyers & Stakeholders**
1680
+ - List key stakeholders involved in the project, e.g. buyers, end-users, and decision-makers.
1681
+ - Identify their name and roles in the project, using a table.
1682
+
1683
+ #### **4. System Flow**
1684
+ - Provide description of how the system components interact, describing what each module does and how it works.
1685
+ - Use one of the following:
1686
+ - Visual Representation: Diagram illustrating workflows or data flows between modules.
1687
+ - Textual Description: Detailed explanation of the processes and transitions
1688
+ - Use bullet point, ensure simplicity and avoid overly technical language.
1689
+
1690
+ #### **5. Modules and Functional Requirements Breakdown**
1691
+ - LEAVE THIS BLANK
1692
+
1693
+ #### **6. Acceptance Criteria**
1694
+ - Define conditions to be met, including specific, measurable criteria for project completion:
1695
+ - Link each deliverable/module to its validation or testing process (e.g., UAT).
1696
+ - Table format with the following column:
1697
+ - Deliverable (e.g. Field Matching Automation module)
1698
+ - Criteria, starting with "able to" (e.g. able to extract, match, and change the case status accordingly)
1699
+
1700
+ #### **7. Assumptions and Pre-requisites**
1701
+ - List all planning-phase assumptions and pre-requisites, grouped into:
1702
+ - Assumptions: Detailed, scenario-based assumptions that the project relies on. Each assumption should:
1703
+ - Reference specific stakeholders (e.g., PWT staff, Mindhive).
1704
+ - Describe specific conditions or expectations (e.g., document quality, workflow stability).
1705
+ - Be written in clear, concise language.
1706
+ - Pre-requisites or dependencies: Conditions that must be met before the project can proceed. Each pre-requisite should:
1707
+ - Be specific and actionable.
1708
+ - Reference who is responsible and what needs to be done.
1709
+
1710
+ #### **8. Proposed Timeline**
1711
+ - Provide a project timeline, including:
1712
+ - Milestone
1713
+ - Expected Date/Duration
1714
+ - Outcome/Deliverable
1715
+ - Use a Gantt chart or table to visualize the timeline.
1716
+ - Ensure the output are clean, organized, and easy to read.
1717
+
1718
+ #### **9. Commercial**
1719
+ Summarize the project's commercial details in the following subsections:
1720
+ - Development Fee: Create a table summarizing the costs for development, including the product, technical work supporting, or other additional services provided
1721
+ - Subscription Fee: If applicable, create a table summarizing subscription fees for system usage.
1722
+ - Payment Terms: Include a text description of payment terms:
1723
+ - Milestones: Specify at which stages payments are due
1724
+ - Invoicing: Define invoicing intervals (e.g., monthly, quarterly) and payment deadlines
1725
+ - Other Terms: Mention late payment fees or additional terms, if applicable
1726
+ - Output Format for tables: {Service}, {Fee} (leave amount blank)
1727
+
1728
+ #### **10. Sign-Off**
1729
+ - Create a professional and formal Sign-Off section to acknowledge and approve the SOW.
1730
+ - Include an statement to clearly communicate that both parties have reviewed and agreed to the SOW.
1731
+ - Provide placeholder for each party (Company):
1732
+ - Signature
1733
+ - Name
1734
+ - Position
1735
+ - Date
1736
+
1737
+ #### **Guidelines**
1738
+ - Use bullet points for clarity.
1739
+ - Keep descriptions concise and client-friendly; avoid technical jargon unless necessary.
1740
+ - Maintain structured sections and tables for readability.
1741
+
1742
+ Expected output should be professional, well-structured, and designed to help clients and stakeholders clearly understand the project scope. I'm going to tip you for a better outcome!
1743
+ """,
1744
  inputs=["generated_prd" , "generated_plan_test_components" , "reformatted_dev_components" , "combined_cost_summary"],
1745
  outputs=["generated_BD_SOW"],
1746
  model=ModelType.O1_MINI,
 
1769
  ),
1770
 
1771
  "generate_engage_BD_SOW": PromptConfig(
1772
+ prompt=
1773
+ """
1774
+ As an project manager with 20+ years of experience, you are tasked to create a detailed Scope of Work (SOW) document. Analyze the provided project component list and scope document to generate the following sections. Follow the guidelines below to ensure a professional, structured, and client-ready output:
1775
+
1776
+ ### **Scope of Work (SOW)**
1777
+
1778
+ #### **1. Project Background**
1779
+ - Provide a brief overview of the project, including the context, problem statement, and why the project is being initiated.
1780
+ - Break down key challenges (in bullet point) the company currently facing, quantifying the impacts where possible (e.g., lost revenue, downtime).
1781
+ - Close this section with industry trends or other relevant background information, emphasizing risks of inaction leading to the project, in 2-3 sentences.
1782
+
1783
+ #### **2. Project Objective**
1784
+ - Clearly define the project's primary goals and what constitutes success, using the following structure:
1785
+ - Goals: List specific, measurable goals (e.g., reduce processing time by 20%).
1786
+ - Outcomes: Describe tangible deliverables and metrics for success.
1787
+
1788
+ #### **3. Project Buyers & Stakeholders**
1789
+ - List key stakeholders involved in the project, e.g. buyers, end-users, and decision-makers.
1790
+ - Identify their name and roles in the project, using a table.
1791
+
1792
+ #### **4. System Flow**
1793
+ - Provide description of how the system components interact, describing what each module does and how it works.
1794
+ - Use one of the following:
1795
+ - Visual Representation: Diagram illustrating workflows or data flows between modules.
1796
+ - Textual Description: Detailed explanation of the processes and transitions
1797
+ - Use bullet point, ensure simplicity and avoid overly technical language.
1798
+
1799
+ #### **5. Modules and Functional Requirements Breakdown**
1800
+ - LEAVE THIS BLANK
1801
+
1802
+ #### **6. Acceptance Criteria**
1803
+ - Define conditions to be met, including specific, measurable criteria for project completion:
1804
+ - Link each deliverable/module to its validation or testing process (e.g., UAT).
1805
+ - Table format with the following column:
1806
+ - Deliverable (e.g. Field Matching Automation module)
1807
+ - Criteria, starting with "able to" (e.g. able to extract, match, and change the case status accordingly)
1808
+
1809
+ #### **7. Assumptions and Pre-requisites**
1810
+ - List all planning-phase assumptions and pre-requisites, grouped into:
1811
+ - Assumptions: Detailed, scenario-based assumptions that the project relies on. Each assumption should:
1812
+ - Reference specific stakeholders (e.g., PWT staff, Mindhive).
1813
+ - Describe specific conditions or expectations (e.g., document quality, workflow stability).
1814
+ - Be written in clear, concise language.
1815
+ - Pre-requisites or dependencies: Conditions that must be met before the project can proceed. Each pre-requisite should:
1816
+ - Be specific and actionable.
1817
+ - Reference who is responsible and what needs to be done.
1818
+
1819
+ #### **8. Proposed Timeline**
1820
+ - Provide a project timeline, including:
1821
+ - Milestone
1822
+ - Expected Date/Duration
1823
+ - Outcome/Deliverable
1824
+ - Use a Gantt chart or table to visualize the timeline.
1825
+ - Ensure the output are clean, organized, and easy to read.
1826
+
1827
+ #### **9. Commercial**
1828
+ Summarize the project's commercial details in the following subsections:
1829
+ - Development Fee: Create a table summarizing the costs for development, including the product, technical work supporting, or other additional services provided
1830
+ - Subscription Fee: If applicable, create a table summarizing subscription fees for system usage.
1831
+ - Payment Terms: Include a text description of payment terms:
1832
+ - Milestones: Specify at which stages payments are due
1833
+ - Invoicing: Define invoicing intervals (e.g., monthly, quarterly) and payment deadlines
1834
+ - Other Terms: Mention late payment fees or additional terms, if applicable
1835
+ - Output Format for tables: {Service}, {Fee} (leave amount blank)
1836
+
1837
+ #### **10. Sign-Off**
1838
+ - Create a professional and formal Sign-Off section to acknowledge and approve the SOW.
1839
+ - Include an statement to clearly communicate that both parties have reviewed and agreed to the SOW.
1840
+ - Provide placeholder for each party (Company):
1841
+ - Signature
1842
+ - Name
1843
+ - Position
1844
+ - Date
1845
+
1846
+ #### **Guidelines**
1847
+ - Use bullet points for clarity.
1848
+ - Keep descriptions concise and client-friendly; avoid technical jargon unless necessary.
1849
+ - Maintain structured sections and tables for readability.
1850
+
1851
+ Expected output should be professional, well-structured, and designed to help clients and stakeholders clearly understand the project scope. I'm going to tip you for a better outcome!
1852
+ """,
1853
  inputs=["generated_prd" , "generated_plan_test_components" , "reformatted_dev_components" , "generated_intent_list" , "combined_cost_summary" ],
1854
  outputs=["generated_BD_SOW"],
1855
  model=ModelType.O1_MINI,
 
1878
  ),
1879
 
1880
  "generate_Tech_SOW": PromptConfig(
1881
+ prompt=
1882
+ """
1883
+ As an experienced project manager with over 20 years of expertise, you are tasked to create a detailed Scope of Work (SOW) document in JSON format. The JSON output should contain markdown-formatted strings as values for each section of the SOW. Analyze the provided project component list and scope document to generate the following sections:
1884
+
1885
+ ### **Scope of Work (SOW)**
1886
+
1887
+ #### **1. Scope Summary**
1888
+ - Provide a concise, high-level overview of the project scope. Divide it into three subsections:
1889
+ - In Scope:
1890
+ - List all deliverables, functionalities, and modules included in the project.
1891
+ - Be specific about what will be developed, implemented, or delivered.
1892
+ - Include MVP components (e.g., basic features, functionality, UI, document processing).
1893
+ - Assumptions:
1894
+ - Highlight key project-specific assumptions that the project relies on.
1895
+ - Dependencies:
1896
+ - List all internal and external dependencies required for the project's success.
1897
+ - Include any third-party integrations, resources, or timelines that the project depends on.
1898
+
1899
+ #### **2. Modules and Functional Requirements Breakdown**
1900
+ - Break down the project into modules or components.
1901
+ - Prsent the details in a succinct and client-friendly table with the following column:
1902
+ - Module
1903
+ ii. Functionalities/Features
1904
+ iii. Notes for any special considerations or constraints (e.g., 'Supports files up to 100MB').
1905
+ - Use clear, concise, non-technical language. (e.g., 'Drag-and-drop support for PDFs, Excel, CSV; progress indicators'). Avoid excessive detail.
1906
+ - Include MVP components as part of the breakdown and provide clear functionalities related to those (e.g., basic document upload, UI features, data processing).
1907
+
1908
+ #### **3. Out of Scope**
1909
+ - Explicitly define what is excluded from the project's scope. This may include any functionalities, tasks, or deliverables.
1910
+ - Ensure clarity to prevent future misunderstandings.
1911
+
1912
+ #### **4. System Flow**
1913
+ - Provide a detailed, step-by-step description of how the system components interact.
1914
+ - For each component or module:
1915
+ - Describe what it does and how it works, including both success and unsuccessful scenarios
1916
+ - Explain how it interacts with other components
1917
+ - Include MVP-related components in the system flow, describing their function and interaction within the MVP's framework.
1918
+
1919
+ Output Requirements:
1920
+ Just return the json object and nothing else, omit code guards ```, where each key represents a section of the SOW, and the value is a markdown-formatted string for that section.
1921
+ Use clear, concise, and client-friendly language. Avoid excessive technical jargon unless necessary.
1922
+ Example JSON Output:
1923
+ {
1924
+ "scope_summary": <markdown content>,
1925
+ "modules_and_functional_requirements": <markdown content>,
1926
+ "out_of_scope": <markdown content>,
1927
+ "system_flow": <markdown content>
1928
+ }
1929
+ """,
1930
  inputs=["generated_plan_test_components","reformatted_dev_components","generated_MVP_mandays"],
1931
  outputs=["generated_Tech_SOW"],
1932
  model=ModelType.O1_MINI,