ICAS03 commited on
Commit
2127f72
·
1 Parent(s): 025d333

Phase 2 : AI for dynamic render

Browse files
Files changed (6) hide show
  1. Project.py +31 -24
  2. app.py +32 -120
  3. code_modifier.py +475 -260
  4. code_updater.py +345 -0
  5. page_prompts_config.py +43 -48
  6. step_handlers.py +48 -0
Project.py CHANGED
@@ -65,10 +65,9 @@ class Project:
65
  'project_detail': lambda self: self.get_project_detail(),
66
  'generated_prd': lambda self: self.generated_prd,
67
  'component_list': lambda self: self.component_list,
68
- 'derived_plan_test_components': lambda self: self.derived_plan_test_components,
69
- 'derived_dev_components': lambda self: self.derived_dev_components,
70
  'quotation_cost': lambda self: self.quotation_cost,
71
- 'derived_tasks': lambda self: self.derived_tasks,
72
  }
73
 
74
  def execute_prompt(self, prompt_name: str, input_variables: Dict[str, Any] = None) -> str:
@@ -166,12 +165,13 @@ class Project:
166
  }
167
  )
168
 
169
- #Functions to interact with the frontend#
 
170
  def v4_generate_prd_and_components(progress=gr.Progress()):
171
  """Generate PRD and components from project details"""
172
  progress(0, desc="Progress 1: Generating PRD from Q&A...")
173
  generated_prd = state.quotation_project.execute_prompt(
174
- "rewrite_qa",
175
  {
176
  "project_detail": state.quotation_project.get_project_detail()
177
  }
@@ -180,7 +180,7 @@ def v4_generate_prd_and_components(progress=gr.Progress()):
180
  # Step 2: Generate Components
181
  progress(0.4, desc="Progress 2: Generating Planning & Testing Component...")
182
  plan_test_component_list = state.quotation_project.execute_prompt(
183
- "generate_components",
184
  {
185
  "generated_prd": state.quotation_project.generated_prd
186
  }
@@ -205,35 +205,40 @@ def v4_generate_prd_and_components(progress=gr.Progress()):
205
  "Generated PRD and Component List! Click Generate Final Quotation To Generate Quotation"
206
  ]
207
 
208
- def v4_generate_quotation(updated_plan_test_component, updated_dev_component, progress=gr.Progress()):
209
- state.quotation_project.derived_plan_test_components = updated_plan_test_component
210
- state.quotation_project.derived_dev_components = updated_dev_component
 
211
 
212
- # Generate mandays for plan & test components
213
- progress(0.5, desc="Progress 1: Deriving Mandays for Plan & Test Components...")
214
  plan_test_mandays = state.quotation_project.execute_prompt(
215
  "generate_plan_test_mandays",
216
  {
217
- "derived_plan_test_components": updated_plan_test_component
218
  }
219
  )
220
 
221
- # Generate mandays for dev components
222
- progress(0.7, desc="Progress 2: Deriving Mandays for Dev Components...")
223
  dev_mandays = state.quotation_project.execute_prompt(
224
  "generate_dev_mandays",
225
  {
226
- "derived_dev_components": updated_dev_component
227
  }
228
  )
229
 
230
  print(f"Dev Mandays Result: {dev_mandays}")
231
 
232
- # Process results - Add error handling and column validation
233
  try:
234
  plan_test_df = pd.read_csv(StringIO(plan_test_mandays), on_bad_lines='skip')
235
  dev_df = pd.read_csv(StringIO(dev_mandays), on_bad_lines='skip')
236
 
 
 
 
 
237
  # Validate and standardize column names
238
  required_plan_test_columns = ['component', 'mandays', 'description']
239
  required_dev_columns = ['component', 'subcomponent', 'mandays', 'description']
@@ -264,9 +269,10 @@ def v4_generate_quotation(updated_plan_test_component, updated_dev_component, pr
264
  error_message = f"Error processing CSV data: {str(e)}"
265
  return [None, None, error_message, "Error calculating costs"]
266
 
 
267
  def v4_generate_sow(generated_prd, plan_test_component, dev_component, cost, progress=gr.Progress()):
268
- state.quotation_project.derived_plan_test_components = plan_test_component
269
- state.quotation_project.derived_dev_components = dev_component
270
  state.quotation_project.quotation_cost = cost
271
 
272
  # Generate general SOW
@@ -275,19 +281,19 @@ def v4_generate_sow(generated_prd, plan_test_component, dev_component, cost, pro
275
  "generate_BD_SOW",
276
  {
277
  "generated_prd": generated_prd,
278
- "derived_plan_test_components": plan_test_component,
279
- "derived_dev_components": dev_component,
280
  "quotation_cost": cost
281
  }
282
  )
283
 
284
- # Generate detailed SOW
285
  progress(0.8, desc="Progress 2: Drafting Technical SOW")
286
  detailed_sow_json = state.quotation_project.execute_prompt(
287
  "generate_Tech_SOW",
288
  {
289
- "derived_plan_test_components": plan_test_component,
290
- "derived_dev_components": dev_component
291
  }
292
  )
293
 
@@ -310,6 +316,7 @@ def v4_generate_sow(generated_prd, plan_test_component, dev_component, cost, pro
310
  except Exception as e:
311
  return ["Error generating SOW", "Error: " + str(e), "Failed to generate SOW"]
312
 
 
313
  def v4_recalculate_cost(plan_test_df, dev_df):
314
  """Recalculate costs based on modified dataframe values"""
315
  try:
@@ -336,7 +343,7 @@ def update_display_mode(mode, content):
336
  else:
337
  return gr.update(visible=False, value=content), gr.update(visible=True, value=content)
338
 
339
-
340
  def update_system_prompts(new_prompt: str, prompt_name: str) -> str:
341
  """Update system prompts in both runtime PROMPTS dictionary and config file
342
 
 
65
  'project_detail': lambda self: self.get_project_detail(),
66
  'generated_prd': lambda self: self.generated_prd,
67
  'component_list': lambda self: self.component_list,
68
+ 'generated_plan_test_components': lambda self: self.generated_plan_test_components,
69
+ 'generated_dev_components': lambda self: self.generated_dev_components,
70
  'quotation_cost': lambda self: self.quotation_cost,
 
71
  }
72
 
73
  def execute_prompt(self, prompt_name: str, input_variables: Dict[str, Any] = None) -> str:
 
165
  }
166
  )
167
 
168
+ ## Functions that interact with the steps button handler ##
169
+ # Generate Scope & Components
170
  def v4_generate_prd_and_components(progress=gr.Progress()):
171
  """Generate PRD and components from project details"""
172
  progress(0, desc="Progress 1: Generating PRD from Q&A...")
173
  generated_prd = state.quotation_project.execute_prompt(
174
+ "generate_prd",
175
  {
176
  "project_detail": state.quotation_project.get_project_detail()
177
  }
 
180
  # Step 2: Generate Components
181
  progress(0.4, desc="Progress 2: Generating Planning & Testing Component...")
182
  plan_test_component_list = state.quotation_project.execute_prompt(
183
+ "generate_plan_test_components",
184
  {
185
  "generated_prd": state.quotation_project.generated_prd
186
  }
 
205
  "Generated PRD and Component List! Click Generate Final Quotation To Generate Quotation"
206
  ]
207
 
208
+ # Generate Plan & Test Mandays + Quotation based on components
209
+ def v4_generate_mandays_and_quotation(plan_test_component, dev_component, progress=gr.Progress()):
210
+ state.quotation_project.generated_plan_test_components = plan_test_component
211
+ state.quotation_project.generated_dev_components = dev_component
212
 
213
+ # Generate Plan & Test Mandays
214
+ progress(0.5, desc="Progress 1: Generating Mandays for Plan & Test Components...")
215
  plan_test_mandays = state.quotation_project.execute_prompt(
216
  "generate_plan_test_mandays",
217
  {
218
+ "generated_plan_test_components": plan_test_component
219
  }
220
  )
221
 
222
+ # Generate Dev Mandays
223
+ progress(0.7, desc="Progress 2: Generating Mandays for Dev Components...")
224
  dev_mandays = state.quotation_project.execute_prompt(
225
  "generate_dev_mandays",
226
  {
227
+ "generated_dev_components": dev_component
228
  }
229
  )
230
 
231
  print(f"Dev Mandays Result: {dev_mandays}")
232
 
233
+ # Convert the results into dataframe
234
  try:
235
  plan_test_df = pd.read_csv(StringIO(plan_test_mandays), on_bad_lines='skip')
236
  dev_df = pd.read_csv(StringIO(dev_mandays), on_bad_lines='skip')
237
 
238
+ # Standardize column names to lowercase
239
+ plan_test_df.columns = plan_test_df.columns.str.lower()
240
+ dev_df.columns = dev_df.columns.str.lower()
241
+
242
  # Validate and standardize column names
243
  required_plan_test_columns = ['component', 'mandays', 'description']
244
  required_dev_columns = ['component', 'subcomponent', 'mandays', 'description']
 
269
  error_message = f"Error processing CSV data: {str(e)}"
270
  return [None, None, error_message, "Error calculating costs"]
271
 
272
+ # Generate BD & Tech SOW
273
  def v4_generate_sow(generated_prd, plan_test_component, dev_component, cost, progress=gr.Progress()):
274
+ state.quotation_project.generated_plan_test_components = plan_test_component
275
+ state.quotation_project.generated_dev_components = dev_component
276
  state.quotation_project.quotation_cost = cost
277
 
278
  # Generate general SOW
 
281
  "generate_BD_SOW",
282
  {
283
  "generated_prd": generated_prd,
284
+ "generated_plan_test_components": plan_test_component,
285
+ "generated_dev_components": dev_component,
286
  "quotation_cost": cost
287
  }
288
  )
289
 
290
+ # Generate Technical SOW
291
  progress(0.8, desc="Progress 2: Drafting Technical SOW")
292
  detailed_sow_json = state.quotation_project.execute_prompt(
293
  "generate_Tech_SOW",
294
  {
295
+ "generated_plan_test_components": plan_test_component,
296
+ "generated_dev_components": dev_component
297
  }
298
  )
299
 
 
316
  except Exception as e:
317
  return ["Error generating SOW", "Error: " + str(e), "Failed to generate SOW"]
318
 
319
+ # Recalculate costs based on modified dataframe values
320
  def v4_recalculate_cost(plan_test_df, dev_df):
321
  """Recalculate costs based on modified dataframe values"""
322
  try:
 
343
  else:
344
  return gr.update(visible=False, value=content), gr.update(visible=True, value=content)
345
 
346
+ # Updates system prompts in both runtime PROMPTS dictionary and config file
347
  def update_system_prompts(new_prompt: str, prompt_name: str) -> str:
348
  """Update system prompts in both runtime PROMPTS dictionary and config file
349
 
app.py CHANGED
@@ -1,19 +1,18 @@
 
1
  import gradio as gr
2
  from Project import *
3
  from common_functions_v4 import *
4
- from google_drive import *
5
  from notion import *
6
  from state import state
7
  from page_prompts_config import PROMPTS, ModelType, UIComponentType, UIConfig, PromptConfig
8
  from typing import Dict, Any, Tuple
9
  from Project import update_system_prompts
10
-
11
-
12
- with open("page_main.css", "r") as file:
13
- custom_css = file.read()
14
 
15
  def create_ui_component(config: UIConfig, prompt: str = None) -> Any:
16
- """Create a single UI component based on configuration"""
17
  if config.component_type == UIComponentType.TEXTBOX:
18
  return gr.Textbox(
19
  label=config.label,
@@ -48,29 +47,22 @@ def create_ui_component(config: UIConfig, prompt: str = None) -> Any:
48
  return None
49
 
50
  def create_section_components(prompt_config: PromptConfig) -> Dict[str, Any]:
51
- """Create all UI components for a section based on its prompt configuration"""
52
  components = {}
53
 
54
  with gr.Row():
55
  with gr.Column(scale=1):
56
- # Create editor component if it exists in config
57
  editor_key = next((k for k in prompt_config.ui.keys() if k.endswith('_prompt_editor')), None)
58
  if editor_key:
59
- # Format the prompt to preserve original structure
60
- # Remove any common leading whitespace while preserving relative indentation
61
  lines = prompt_config.prompt.split('\n')
62
  if lines:
63
- # Find minimum indentation (excluding empty lines)
64
  min_indent = min((len(line) - len(line.lstrip())
65
  for line in lines if line.strip()),
66
  default=0)
67
- # Remove common leading whitespace while preserving structure
68
  formatted_lines = [
69
  line[min_indent:] if line.strip() else ''
70
  for line in lines
71
  ]
72
  formatted_prompt = '\n'.join(formatted_lines)
73
- # Add triple quotes
74
  formatted_prompt = f'"""\n{formatted_prompt}\n"""'
75
  else:
76
  formatted_prompt = '""""""'
@@ -81,12 +73,10 @@ def create_section_components(prompt_config: PromptConfig) -> Dict[str, Any]:
81
  )
82
 
83
  with gr.Column(scale=1):
84
- # Find text and markdown component pairs
85
  text_key = next((k for k in prompt_config.ui.keys() if k.endswith('_text')), None)
86
  markdown_key = next((k for k in prompt_config.ui.keys() if k.endswith('_markdown')), None)
87
 
88
  if text_key and markdown_key:
89
- # Add radio button for display mode
90
  display_mode_key = f"display_mode_{text_key.replace('_text', '')}"
91
  components[display_mode_key] = create_ui_component(
92
  UIConfig(
@@ -104,7 +94,6 @@ def create_section_components(prompt_config: PromptConfig) -> Dict[str, Any]:
104
  prompt_config.ui[markdown_key]
105
  )
106
 
107
- # Add event handler for display mode toggle
108
  components[display_mode_key].change(
109
  fn=update_display_mode,
110
  inputs=[
@@ -117,7 +106,6 @@ def create_section_components(prompt_config: PromptConfig) -> Dict[str, Any]:
117
  ]
118
  )
119
  else:
120
- # Handle other types of components (e.g., dataframe)
121
  for key, config in prompt_config.ui.items():
122
  if not key.endswith('_prompt_editor'):
123
  components[key] = create_ui_component(config)
@@ -125,8 +113,7 @@ def create_section_components(prompt_config: PromptConfig) -> Dict[str, Any]:
125
  return components
126
 
127
  def create_quotation_generator_section():
128
- """Creates the quotation generator section with dynamic components based on PROMPTS"""
129
- page_units_output = None
130
  page_recalc_btn = None
131
  page_progress_update = None
132
 
@@ -140,40 +127,33 @@ def create_quotation_generator_section():
140
  "3. **Generate Output**: Proceed to click generate once prompt is edited.",
141
  "4. **Upload Quotation**: Upload to Google Drive or Notion"
142
  ]:
143
- gr.Markdown(instruction)
144
 
145
  with gr.Column(scale=1):
146
  page_progress_update = gr.Textbox(label="Progress Update", lines=6, interactive=False)
147
 
148
  with gr.Row():
149
  with gr.Column(scale=4):
150
- # Step 1: Save System Prompt
151
  save_prompts_btn = gr.Button("💾 Save System Prompt")
152
 
153
- # Dynamically group prompts by their step
154
  prompt_steps = {}
155
  for prompt_key, prompt_config in PROMPTS.items():
156
  step = prompt_config.step
157
- # Only group prompts that have a non-empty step value
158
- if step and step.strip(): # Check if step exists and is not just whitespace
159
  if step not in prompt_steps:
160
  prompt_steps[step] = []
161
  prompt_steps[step].append(prompt_key)
162
 
163
- # Create components for each step
164
  all_components = {}
165
  step_buttons = {}
166
 
167
  for step_name, prompt_keys in prompt_steps.items():
168
  with gr.Accordion(step_name, open=False):
169
- # Add generate button for the step
170
  button_label = step_name.split(' : ')[1] if ' : ' in step_name else step_name
171
  step_buttons[step_name] = gr.Button(f"✅ Generate {button_label}")
172
 
173
- # Special layout for Step 2
174
  if "Step 2" in step_name:
175
  with gr.Row():
176
- # Left column for section components
177
  with gr.Column(scale=4):
178
  for i, prompt_key in enumerate(prompt_keys, 1):
179
  if prompt_key in PROMPTS:
@@ -181,20 +161,16 @@ def create_quotation_generator_section():
181
  components = create_section_components(PROMPTS[prompt_key])
182
  all_components[prompt_key] = components
183
 
184
- # Right column for summary and controls
185
  with gr.Column(scale=1):
186
- page_units_output = gr.Textbox(label="Cost Summary", lines=3, interactive=False)
187
  page_recalc_btn = gr.Button("Recalculate")
188
 
189
- # Add notes section
190
  page_notes_box = gr.Textbox(
191
  label="Notes",
192
  lines=3,
193
  placeholder="Add your notes here..."
194
  )
195
  page_save_quotation_btn = gr.Button("Save Quotation with Note")
196
-
197
- # Normal layout for other steps
198
  else:
199
  for i, prompt_key in enumerate(prompt_keys, 1):
200
  if prompt_key in PROMPTS:
@@ -202,25 +178,22 @@ def create_quotation_generator_section():
202
  components = create_section_components(PROMPTS[prompt_key])
203
  all_components[prompt_key] = components
204
 
205
- # Upload buttons
206
  page_upload_btn = gr.Button("📁 Upload to Google Drive")
 
207
  page_upload_notion_btn = gr.Button(" Upload to Notion")
 
208
 
209
- # Return the additional components
210
  return (all_components, step_buttons, save_prompts_btn,
211
- page_progress_update, page_upload_btn, page_upload_notion_btn,
212
- page_units_output, page_recalc_btn) # Add these to the return tuple
213
 
214
  def update_display_mode(mode: str, text_content: str) -> tuple:
215
- """Update display mode between text and markdown"""
216
  return (
217
  gr.update(visible=(mode == "Textbox"), value=text_content),
218
  gr.update(visible=(mode == "Markdown"), value=text_content)
219
- )
220
 
221
  def save_all_prompts(*prompts):
222
- """Save all edited prompts that have changed from their original values"""
223
- # Get all prompt names from PROMPTS that have a prompt_editor in their UI config
224
  prompt_names = [
225
  name for name, config in PROMPTS.items()
226
  if config.ui and any(key.endswith('_prompt_editor') for key in config.ui.keys())
@@ -231,14 +204,10 @@ def save_all_prompts(*prompts):
231
  if not prompt:
232
  continue
233
 
234
- # Get original prompt from PROMPTS
235
  original_prompt = PROMPTS[name].prompt
236
-
237
- # Clean up both prompts for comparison
238
  cleaned_prompt = prompt.strip().strip('"""').strip()
239
  cleaned_original = original_prompt.strip()
240
 
241
- # Only update if the prompt has changed
242
  if cleaned_prompt != cleaned_original:
243
  update_system_prompts(prompt, name)
244
  updated_prompts.append(name)
@@ -247,41 +216,35 @@ def save_all_prompts(*prompts):
247
  return f"✅ Successfully updated prompt"
248
  return "No prompts were changed"
249
 
250
- ################################################################################################
 
251
 
252
  with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)", css=custom_css) as page_interface:
253
  gr.Markdown("# Page Quotation Chatbot with SOW")
254
 
255
- # Store all components for event handling
256
  all_components = {}
257
 
258
  with gr.Tab(label="Page Main"):
259
- # Instructions
260
  gr.Markdown("## ⚠️ Instructions #1")
261
  gr.Markdown("### Either select Option 1 or 2 , then scroll down to generate a quotation.")
262
  gr.Markdown("1. **Start a New Session**: Begin answering questions for a new project.")
263
  gr.Markdown("2. **Load an Existing Project**: Navigate to the **Load Project** tab.")
264
 
265
- # Session components
266
  with gr.Row():
267
  start_btn = gr.Button("Start New Session")
268
  with gr.Row():
269
  current_session_display = gr.Markdown(no_active_session)
270
 
271
- # Chat interface
272
  with gr.Row():
273
  with gr.Column(scale=1):
274
-
275
  current_question = gr.Textbox(label="Edit Area", lines=20)
276
  submit_btn = gr.Button("Submit")
277
  clear_btn = gr.Button("Clear Chat")
278
  with gr.Column(scale=1):
279
  chatbot = gr.Chatbot(height=580)
280
 
281
- # Sample answers
282
  gr.Markdown("Below are sample answers you can refer to:")
283
  with gr.Accordion("Sample AR Answers", open=False):
284
- # Define labels for each file
285
  sample_answers = [
286
  {'file': 'q1_answer.txt', 'label': 'Company Background & Industry'},
287
  {'file': 'q2_answer.txt', 'label': 'Current Challenges & Workflow'},
@@ -296,12 +259,10 @@ with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)", css=custom_css) as
296
  show_copy_button=True
297
  )
298
 
299
- # Create quotation generator section with dynamic components
300
  (all_components, step_buttons, save_prompts_btn, page_progress_update,
301
- page_upload_btn, page_upload_notion_btn,
302
- page_units_output, page_recalc_btn) = create_quotation_generator_section()
303
 
304
- # Replace single textbox with separate components
305
  with gr.Tab(label="Load Project"):
306
  gr.Markdown("### Past submissions")
307
  gr.Markdown("Quick hack to load past submissions to regenerate quotations (This page displays Q&A only; previous quotations are not shown yet).")
@@ -319,9 +280,7 @@ with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)", css=custom_css) as
319
 
320
  with gr.Tab(label="Requirement"):
321
  fetched_requirements_box = gr.Markdown(value="")
322
-
323
- ################################################################################################
324
- # Event handlers
325
  def setup_event_handlers():
326
  start_btn.click(
327
  fn=lambda: (*start_chat(), *get_project_state()),
@@ -338,15 +297,17 @@ with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)", css=custom_css) as
338
  fn=lambda: ([], ""),
339
  outputs=[chatbot, current_question]
340
  )
341
-
 
 
342
  page_recalc_btn.click(
343
  fn=v4_recalculate_cost,
344
  inputs=[
345
- all_components['generate_plan_test_mandays']['page_plan_test_mandays'],
346
- all_components['generate_dev_mandays']['page_dev_mandays'],
347
  ],
348
  outputs=[
349
- page_units_output,
350
  page_progress_update
351
  ]
352
  )
@@ -362,68 +323,19 @@ with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)", css=custom_css) as
362
  ]
363
  )
364
 
365
- prompt_editors = [
366
- components[next(k for k in components.keys() if k.endswith('_prompt_editor'))]
367
- for components in all_components.values()
368
- if any(k.endswith('_prompt_editor') for k in components.keys())
369
- ]
370
-
371
  save_prompts_btn.click(
372
  fn=save_all_prompts,
373
- inputs=prompt_editors,
374
- outputs=[page_progress_update]
375
- )
376
-
377
- # Handlers for step buttons
378
- step_buttons["Step 1 : Scope & Components"].click(
379
- fn=v4_generate_prd_and_components,
380
- inputs=[],
381
- outputs=[
382
- all_components['rewrite_qa']['page_prd_box_text'],
383
- all_components['rewrite_qa']['page_prd_box_markdown'],
384
- all_components['generate_components']['page_plan_test_component_table_text'],
385
- all_components['generate_components']['page_plan_test_component_table_markdown'],
386
- all_components['generate_dev_components']['page_dev_component_table_text'],
387
- all_components['generate_dev_components']['page_dev_component_table_markdown'],
388
- page_progress_update
389
- ]
390
- )
391
-
392
- step_buttons["Step 2 : Planning & Testing Components"].click(
393
- fn=v4_generate_quotation,
394
- inputs=[
395
- all_components['generate_components']['page_plan_test_component_table_text'],
396
- all_components['generate_dev_components']['page_dev_component_table_text']
397
- ],
398
- outputs=[
399
- all_components['generate_plan_test_mandays']['page_plan_test_mandays'],
400
- all_components['generate_dev_mandays']['page_dev_mandays'],
401
- page_progress_update,
402
- page_units_output
403
- ]
404
- )
405
-
406
- step_buttons["Step 3 : SOW Doc"].click(
407
- fn=v4_generate_sow,
408
  inputs=[
409
- all_components['rewrite_qa']['page_prd_box_text'],
410
- all_components['generate_components']['page_plan_test_component_table_text'],
411
- all_components['generate_dev_components']['page_dev_component_table_text'],
412
- page_units_output
413
  ],
414
- outputs=[
415
- all_components['generate_BD_SOW']['page_general_sow_text'],
416
- all_components['generate_BD_SOW']['page_general_sow_markdown'],
417
- all_components['generate_Tech_SOW']['page_detailed_sow_text'],
418
- all_components['generate_Tech_SOW']['page_detailed_sow_markdown'],
419
- page_progress_update
420
- ]
421
  )
 
422
 
423
-
424
  setup_event_handlers()
425
 
426
-
427
  if __name__ == "__main__":
428
- page_interface.launch(share=True)
429
-
 
1
+ from typing import Tuple
2
  import gradio as gr
3
  from Project import *
4
  from common_functions_v4 import *
5
+ from google_drive import *
6
  from notion import *
7
  from state import state
8
  from page_prompts_config import PROMPTS, ModelType, UIComponentType, UIConfig, PromptConfig
9
  from typing import Dict, Any, Tuple
10
  from Project import update_system_prompts
11
+ import pandas as pd
12
+ from io import StringIO
13
+ from step_handlers import setup_all_handlers
 
14
 
15
  def create_ui_component(config: UIConfig, prompt: str = None) -> Any:
 
16
  if config.component_type == UIComponentType.TEXTBOX:
17
  return gr.Textbox(
18
  label=config.label,
 
47
  return None
48
 
49
  def create_section_components(prompt_config: PromptConfig) -> Dict[str, Any]:
 
50
  components = {}
51
 
52
  with gr.Row():
53
  with gr.Column(scale=1):
 
54
  editor_key = next((k for k in prompt_config.ui.keys() if k.endswith('_prompt_editor')), None)
55
  if editor_key:
 
 
56
  lines = prompt_config.prompt.split('\n')
57
  if lines:
 
58
  min_indent = min((len(line) - len(line.lstrip())
59
  for line in lines if line.strip()),
60
  default=0)
 
61
  formatted_lines = [
62
  line[min_indent:] if line.strip() else ''
63
  for line in lines
64
  ]
65
  formatted_prompt = '\n'.join(formatted_lines)
 
66
  formatted_prompt = f'"""\n{formatted_prompt}\n"""'
67
  else:
68
  formatted_prompt = '""""""'
 
73
  )
74
 
75
  with gr.Column(scale=1):
 
76
  text_key = next((k for k in prompt_config.ui.keys() if k.endswith('_text')), None)
77
  markdown_key = next((k for k in prompt_config.ui.keys() if k.endswith('_markdown')), None)
78
 
79
  if text_key and markdown_key:
 
80
  display_mode_key = f"display_mode_{text_key.replace('_text', '')}"
81
  components[display_mode_key] = create_ui_component(
82
  UIConfig(
 
94
  prompt_config.ui[markdown_key]
95
  )
96
 
 
97
  components[display_mode_key].change(
98
  fn=update_display_mode,
99
  inputs=[
 
106
  ]
107
  )
108
  else:
 
109
  for key, config in prompt_config.ui.items():
110
  if not key.endswith('_prompt_editor'):
111
  components[key] = create_ui_component(config)
 
113
  return components
114
 
115
  def create_quotation_generator_section():
116
+ quotation_cost = None
 
117
  page_recalc_btn = None
118
  page_progress_update = None
119
 
 
127
  "3. **Generate Output**: Proceed to click generate once prompt is edited.",
128
  "4. **Upload Quotation**: Upload to Google Drive or Notion"
129
  ]:
130
+ gr.Markdown(instruction)
131
 
132
  with gr.Column(scale=1):
133
  page_progress_update = gr.Textbox(label="Progress Update", lines=6, interactive=False)
134
 
135
  with gr.Row():
136
  with gr.Column(scale=4):
 
137
  save_prompts_btn = gr.Button("💾 Save System Prompt")
138
 
 
139
  prompt_steps = {}
140
  for prompt_key, prompt_config in PROMPTS.items():
141
  step = prompt_config.step
142
+ if step and step.strip():
 
143
  if step not in prompt_steps:
144
  prompt_steps[step] = []
145
  prompt_steps[step].append(prompt_key)
146
 
 
147
  all_components = {}
148
  step_buttons = {}
149
 
150
  for step_name, prompt_keys in prompt_steps.items():
151
  with gr.Accordion(step_name, open=False):
 
152
  button_label = step_name.split(' : ')[1] if ' : ' in step_name else step_name
153
  step_buttons[step_name] = gr.Button(f"✅ Generate {button_label}")
154
 
 
155
  if "Step 2" in step_name:
156
  with gr.Row():
 
157
  with gr.Column(scale=4):
158
  for i, prompt_key in enumerate(prompt_keys, 1):
159
  if prompt_key in PROMPTS:
 
161
  components = create_section_components(PROMPTS[prompt_key])
162
  all_components[prompt_key] = components
163
 
 
164
  with gr.Column(scale=1):
165
+ quotation_cost = gr.Textbox(label="Cost Summary", lines=3, interactive=False)
166
  page_recalc_btn = gr.Button("Recalculate")
167
 
 
168
  page_notes_box = gr.Textbox(
169
  label="Notes",
170
  lines=3,
171
  placeholder="Add your notes here..."
172
  )
173
  page_save_quotation_btn = gr.Button("Save Quotation with Note")
 
 
174
  else:
175
  for i, prompt_key in enumerate(prompt_keys, 1):
176
  if prompt_key in PROMPTS:
 
178
  components = create_section_components(PROMPTS[prompt_key])
179
  all_components[prompt_key] = components
180
 
 
181
  page_upload_btn = gr.Button("📁 Upload to Google Drive")
182
+ #Still not working :/
183
  page_upload_notion_btn = gr.Button(" Upload to Notion")
184
+
185
 
 
186
  return (all_components, step_buttons, save_prompts_btn,
187
+ page_progress_update, page_upload_btn, page_upload_notion_btn,
188
+ quotation_cost, page_recalc_btn, page_notes_box, page_save_quotation_btn)
189
 
190
  def update_display_mode(mode: str, text_content: str) -> tuple:
 
191
  return (
192
  gr.update(visible=(mode == "Textbox"), value=text_content),
193
  gr.update(visible=(mode == "Markdown"), value=text_content)
194
+ )
195
 
196
  def save_all_prompts(*prompts):
 
 
197
  prompt_names = [
198
  name for name, config in PROMPTS.items()
199
  if config.ui and any(key.endswith('_prompt_editor') for key in config.ui.keys())
 
204
  if not prompt:
205
  continue
206
 
 
207
  original_prompt = PROMPTS[name].prompt
 
 
208
  cleaned_prompt = prompt.strip().strip('"""').strip()
209
  cleaned_original = original_prompt.strip()
210
 
 
211
  if cleaned_prompt != cleaned_original:
212
  update_system_prompts(prompt, name)
213
  updated_prompts.append(name)
 
216
  return f"✅ Successfully updated prompt"
217
  return "No prompts were changed"
218
 
219
+ with open("page_main.css", "r") as file:
220
+ custom_css = file.read()
221
 
222
  with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)", css=custom_css) as page_interface:
223
  gr.Markdown("# Page Quotation Chatbot with SOW")
224
 
 
225
  all_components = {}
226
 
227
  with gr.Tab(label="Page Main"):
 
228
  gr.Markdown("## ⚠️ Instructions #1")
229
  gr.Markdown("### Either select Option 1 or 2 , then scroll down to generate a quotation.")
230
  gr.Markdown("1. **Start a New Session**: Begin answering questions for a new project.")
231
  gr.Markdown("2. **Load an Existing Project**: Navigate to the **Load Project** tab.")
232
 
 
233
  with gr.Row():
234
  start_btn = gr.Button("Start New Session")
235
  with gr.Row():
236
  current_session_display = gr.Markdown(no_active_session)
237
 
 
238
  with gr.Row():
239
  with gr.Column(scale=1):
 
240
  current_question = gr.Textbox(label="Edit Area", lines=20)
241
  submit_btn = gr.Button("Submit")
242
  clear_btn = gr.Button("Clear Chat")
243
  with gr.Column(scale=1):
244
  chatbot = gr.Chatbot(height=580)
245
 
 
246
  gr.Markdown("Below are sample answers you can refer to:")
247
  with gr.Accordion("Sample AR Answers", open=False):
 
248
  sample_answers = [
249
  {'file': 'q1_answer.txt', 'label': 'Company Background & Industry'},
250
  {'file': 'q2_answer.txt', 'label': 'Current Challenges & Workflow'},
 
259
  show_copy_button=True
260
  )
261
 
 
262
  (all_components, step_buttons, save_prompts_btn, page_progress_update,
263
+ page_upload_btn, page_upload_notion_btn,
264
+ quotation_cost, page_recalc_btn, page_notes_box, page_save_quotation_btn) = create_quotation_generator_section()
265
 
 
266
  with gr.Tab(label="Load Project"):
267
  gr.Markdown("### Past submissions")
268
  gr.Markdown("Quick hack to load past submissions to regenerate quotations (This page displays Q&A only; previous quotations are not shown yet).")
 
280
 
281
  with gr.Tab(label="Requirement"):
282
  fetched_requirements_box = gr.Markdown(value="")
283
+
 
 
284
  def setup_event_handlers():
285
  start_btn.click(
286
  fn=lambda: (*start_chat(), *get_project_state()),
 
297
  fn=lambda: ([], ""),
298
  outputs=[chatbot, current_question]
299
  )
300
+
301
+ setup_all_handlers(step_buttons, all_components, page_progress_update, quotation_cost)
302
+
303
  page_recalc_btn.click(
304
  fn=v4_recalculate_cost,
305
  inputs=[
306
+ all_components['generate_plan_test_mandays']['generated_plan_test_mandays_dataframe'],
307
+ all_components['generate_dev_mandays']['generated_dev_mandays_dataframe'],
308
  ],
309
  outputs=[
310
+ quotation_cost,
311
  page_progress_update
312
  ]
313
  )
 
323
  ]
324
  )
325
 
 
 
 
 
 
 
326
  save_prompts_btn.click(
327
  fn=save_all_prompts,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  inputs=[
329
+ all_components[prompt_key]['prompt_editor']
330
+ for prompt_key in all_components
331
+ if 'prompt_editor' in all_components[prompt_key]
 
332
  ],
333
+ outputs=[page_progress_update]
 
 
 
 
 
 
334
  )
335
+
336
 
337
+ # Call setup_event_handlers after all components are created
338
  setup_event_handlers()
339
 
 
340
  if __name__ == "__main__":
341
+ page_interface.launch(share=True)
 
code_modifier.py CHANGED
@@ -1,274 +1,489 @@
1
- import ast
 
 
 
 
 
2
  import json
3
- from typing import Dict, List
4
- from Project import call_o1_mini
5
- from page_prompts_config import PromptConfig, PROMPTS
6
 
7
- class PromptConfigAnalyzer:
8
- """Analyzes prompt configurations and generates implementation code"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- def __init__(self):
11
- self.prompt_configs = {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- def analyze_prompt_config(self, prompt_name: str, config: PromptConfig) -> Dict:
14
- """Analyze a single prompt configuration"""
15
- analysis = {
16
- "name": prompt_name,
17
- "method_name": f"execute_{prompt_name}",
18
- "inputs": config.inputs,
19
- "outputs": config.outputs,
20
- "ui_components": [],
21
- "state_requirements": []
22
- }
23
-
24
- # Analyze UI requirements
25
- if config.ui:
26
- for component_name, ui_config in config.ui.items():
27
- analysis["ui_components"].append({
28
- "name": component_name,
29
- "type": ui_config.component_type,
30
- "config": ui_config
31
- })
32
-
33
- # Analyze state requirements
34
- for input_var in config.inputs:
35
- analysis["state_requirements"].append({
36
- "name": input_var,
37
- "type": "project_attribute"
38
- })
39
-
40
- return analysis
41
-
42
- def generate_method_code(self, analysis: Dict) -> str:
43
- """Generate method code for a prompt"""
44
- method_name = analysis["method_name"]
45
- inputs = analysis["inputs"]
46
- outputs = analysis["outputs"]
47
 
48
- code = f"""
49
- def {method_name}(self, {', '.join(inputs)}) -> Dict[str, Any]:
50
- \"\"\"Execute {analysis['name']} prompt
51
 
52
- Args:
53
- {chr(10).join(f'{input_}: str' for input_ in inputs)}
54
-
55
- Returns:
56
- Dict containing: {', '.join(outputs)}
57
- \"\"\"
58
- try:
59
- result = self.execute_prompt(
60
- "{analysis['name']}",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  {{
62
- {chr(10).join(f'"{input_}": {input_},' for input_ in inputs)}
 
 
 
 
63
  }}
64
- )
65
-
66
- return {{
67
- {chr(10).join(f'"{output}": result,' for output in outputs)}
68
- }}
69
-
70
- except Exception as e:
71
- return {{"error": str(e)}}
72
- """
73
- return code
74
-
75
- def generate_ui_code(self, analysis: Dict) -> str:
76
- """Generate Gradio UI code for a prompt"""
77
- components = []
78
- for ui_component in analysis["ui_components"]:
79
- config = ui_component["config"]
80
- component_code = f"""
81
- {ui_component['name']} = gr.{config.component_type.value.capitalize()}(
82
- label="{config.label}",
83
- interactive={config.interactive},
84
- visible={config.visible},
85
- {'lines=' + str(config.lines) if config.lines else ''}
86
- {', show_copy_button=True' if config.show_copy_button else ''}
87
- )"""
88
- components.append(component_code)
89
-
90
- return "\n".join(components)
91
-
92
- def analyze_and_update_code():
93
- """Use AI to analyze configs and generate updated implementations"""
94
-
95
- # Read configuration and existing files
96
- with open("page_prompts_config.py", "r") as f:
97
- config_code = f.read()
98
- with open("Project.py", "r") as f:
99
- project_code = f.read()
100
- with open("app.py", "r") as f:
101
- app_code = f.read()
102
- with open("common_functions_v4.py", "r") as f:
103
- common_code = f.read()
104
-
105
- # First, analyze the configuration structure
106
- analysis_prompt = f"""Analyze these files and explain how they work together:
107
-
108
- Configuration (page_prompts_config.py):
109
- {config_code}
110
-
111
- Current Project Implementation (Project.py):
112
- {project_code}
113
-
114
- Current App Implementation (app.py):
115
- {app_code}
116
-
117
- Current Common Functions (common_functions_v4.py):
118
- {common_code}
119
-
120
- Explain:
121
- 1. How do these components interact?
122
- 2. What is the flow of data between components?
123
- 3. What patterns are used in the implementations?
124
- 4. How should the code be structured to best handle the configurations?
125
- 5. What improvements could be made to the architecture?
126
-
127
- Format your response as JSON with these keys: interactions, data_flow, patterns, structure, improvements"""
128
-
129
- print("Analyzing code structure...")
130
- analysis = call_o1_mini(analysis_prompt)
131
- analysis_json = json.loads(analysis)
132
-
133
- # Generate updated Project.py implementation
134
- project_prompt = f"""Based on this analysis, generate an updated Project.py implementation that:
135
-
136
- 1. Handles all configured prompts from page_prompts_config.py
137
- 2. Maintains existing functionality while improving code structure
138
- 3. Implements proper error handling and logging
139
- 4. Uses type hints and documentation
140
- 5. Follows Python best practices
141
-
142
- Analysis:
143
- {json.dumps(analysis_json, indent=2)}
144
-
145
- Current Implementation:
146
- {project_code}
147
-
148
- Configuration:
149
- {config_code}
150
-
151
- Rules:
152
- - Keep existing functionality intact
153
- - Improve code organization
154
- - Add better error handling
155
- - Maintain compatibility with app.py
156
- - Add comprehensive documentation
157
-
158
- Format the response as a complete Python file."""
159
-
160
- print("Generating Project.py implementation...")
161
- project_implementation = call_o1_mini(project_prompt)
162
-
163
- # Generate updated app.py implementation
164
- app_prompt = f"""Based on this analysis, generate an updated app.py implementation that:
165
-
166
- 1. Creates a Gradio interface matching the configuration
167
- 2. Maintains existing functionality while improving structure
168
- 3. Handles all events and user interactions
169
- 4. Provides better error handling and feedback
170
- 5. Follows the specified layout structure
171
-
172
- Analysis:
173
- {json.dumps(analysis_json, indent=2)}
174
-
175
- Current Implementation:
176
- {app_code}
177
-
178
- Configuration:
179
- {config_code}
180
-
181
- Rules:
182
- - Keep existing functionality intact
183
- - Improve UI component generation
184
- - Better state management
185
- - Maintain all current features
186
- - Add better error handling
187
-
188
- Format the response as a complete Python file."""
189
-
190
- print("Generating app.py implementation...")
191
- app_implementation = call_o1_mini(app_prompt)
192
-
193
- # Generate updated common_functions_v4.py implementation
194
- common_prompt = f"""Based on this analysis, generate an updated common_functions_v4.py implementation that:
195
-
196
- 1. Maintains all existing utility functions
197
- 2. Improves code organization and structure
198
- 3. Adds better error handling
199
- 4. Updates function signatures as needed
200
- 5. Improves documentation
201
-
202
- Analysis:
203
- {json.dumps(analysis_json, indent=2)}
204
-
205
- Current Implementation:
206
- {common_code}
207
-
208
- Configuration:
209
- {config_code}
210
-
211
- Rules:
212
- - Keep existing functionality intact
213
- - Improve code organization
214
- - Add better error handling
215
- - Update type hints
216
- - Add comprehensive documentation
217
-
218
- Format the response as a complete Python file."""
219
-
220
- print("Generating common_functions_v4.py implementation...")
221
- common_implementation = call_o1_mini(common_prompt)
222
-
223
- # Validate all implementations
224
- validation_prompt = f"""Validate these implementations for:
225
- 1. Completeness - all current functionality is maintained
226
- 2. Correctness - code will work as intended
227
- 3. Best practices - code follows Python conventions
228
- 4. Error handling - all edge cases are covered
229
- 5. Integration - components work together properly
230
-
231
- Project Implementation:
232
- {project_implementation}
233
-
234
- App Implementation:
235
- {app_implementation}
236
-
237
- Common Functions Implementation:
238
- {common_implementation}
239
-
240
- Provide any necessary corrections in JSON format with these keys: project_fixes, app_fixes, common_fixes"""
241
-
242
- print("Validating implementations...")
243
- validation = call_o1_mini(validation_prompt)
244
- validation_json = json.loads(validation)
245
 
246
- # Apply any fixes
247
- if validation_json.get('project_fixes'):
248
- project_implementation = apply_fixes(project_implementation, validation_json['project_fixes'])
249
- if validation_json.get('app_fixes'):
250
- app_implementation = apply_fixes(app_implementation, validation_json['app_fixes'])
251
- if validation_json.get('common_fixes'):
252
- common_implementation = apply_fixes(common_implementation, validation_json['common_fixes'])
253
 
254
- # Save implementations
255
- print("Saving updated implementations...")
256
- with open("Project.py", "w") as f:
257
- f.write(project_implementation)
258
- with open("app.py", "w") as f:
259
- f.write(app_implementation)
260
- with open("common_functions_v4.py", "w") as f:
261
- f.write(common_implementation)
262
 
263
- return "Successfully updated all implementation files based on configuration analysis"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
 
265
- def apply_fixes(code: str, fixes: dict) -> str:
266
- """Apply fixes to generated code"""
267
- # Implementation of fix application logic
268
- # This would parse the code and apply the specified fixes
269
- return code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
 
271
  if __name__ == "__main__":
272
- print("Starting AI-based code analysis and generation...")
273
- result = analyze_and_update_code()
274
- print(result)
 
1
+ from typing import Dict, List, Set, Optional
2
+ from dataclasses import dataclass
3
+ from enum import Enum
4
+ import openai
5
+ from contextlib import contextmanager
6
+ import re
7
  import json
 
 
 
8
 
9
+ @dataclass
10
+ class UIComponent:
11
+ name: str
12
+ type: str
13
+ config: Dict
14
+
15
+ @dataclass
16
+ class PromptConfig:
17
+ name: str
18
+ inputs: List[str]
19
+ outputs: List[str]
20
+ ui_components: Dict[str, UIComponent]
21
+ step: Optional[str] = None
22
+
23
+ class ChangeType(Enum):
24
+ ADD_PROMPT = "add_prompt"
25
+ REMOVE_PROMPT = "remove_prompt"
26
+ RENAME_PROMPT = "rename_prompt"
27
+ ADD_INPUT = "add_input"
28
+ REMOVE_INPUT = "remove_input"
29
+ RENAME_INPUT = "rename_input"
30
+ ADD_OUTPUT = "add_output"
31
+ REMOVE_OUTPUT = "remove_output"
32
+ RENAME_OUTPUT = "rename_output"
33
+ ADD_UI = "add_ui"
34
+ REMOVE_UI = "remove_ui"
35
+ RENAME_UI = "rename_ui"
36
+ CHANGE_STEP = "change_step"
37
+
38
+ @dataclass
39
+ class ConfigChange:
40
+ type: ChangeType
41
+ prompt_name: str
42
+ old_value: Optional[str] = None
43
+ new_value: Optional[str] = None
44
+ details: Optional[Dict] = None
45
+
46
+ @dataclass
47
+ class AnalysisResult:
48
+ changes: List[ConfigChange]
49
+ affected_files: Set[str]
50
+ required_updates: Dict[str, List[Dict]]
51
 
52
+ @contextmanager
53
+ def openai_session():
54
+ """Context manager to properly handle OpenAI API sessions"""
55
+ try:
56
+ client = openai.OpenAI()
57
+ yield client
58
+ finally:
59
+ if hasattr(client, 'close'):
60
+ client.close()
61
+
62
+ def call_o1_mini(prompt: str) -> str:
63
+ """Call the o1-mini model with the given prompt"""
64
+ with openai_session() as client:
65
+ try:
66
+ response = client.chat.completions.create(
67
+ model="o1-mini",
68
+ messages=[{"role": "user", "content": prompt}]
69
+ )
70
+ return response.choices[0].message.content
71
+ except Exception as e:
72
+ return f"Error generating output: {str(e)}"
73
 
74
+ def clean_ai_response(response: str) -> str:
75
+ """Clean the AI response to ensure valid JSON"""
76
+ try:
77
+ # Remove any potential markdown code block markers
78
+ response = re.sub(r'```json\s*', '', response)
79
+ response = re.sub(r'```\s*$', '', response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
+ # Remove any leading/trailing whitespace
82
+ response = response.strip()
 
83
 
84
+ # Attempt to parse JSON to validate
85
+ json.loads(response)
86
+
87
+ return response
88
+ except json.JSONDecodeError as e:
89
+ print(f"Error cleaning response: {str(e)}")
90
+ print("Raw response:", response)
91
+ return "{}"
92
+
93
+ def analyze_config_structure(config_content: str, project_content: str, current_handlers: str) -> Dict:
94
+ """First AI: Analyzes the current config structure and determines required changes"""
95
+ analysis_prompt = f"""
96
+ You are an expert software analyzer. Your task is to analyze and focus on the configuration structure and validate the relationships between components.
97
+
98
+ 1. OUTPUT AND INPUT VALIDATION RULES:
99
+ - Not all outputs/inputs require UI component mappings
100
+ - Special outputs like 'quotation_cost' can be internal values used as inputs and outputs in other steps
101
+ - Internal values can be passed between steps without UI components , so just IGNORE them
102
+ - Examples of internal values:
103
+ * quotation_cost: Generated as output, used as input in other steps
104
+ * project_detail: Used as input without UI mapping
105
+ * gathered_project_input: Internal state passed between steps
106
+ * reviewed_project_input: Internal state passed between steps
107
+
108
+ 2. OUTPUT TO UI COMPONENT MAPPING RULES:
109
+ - Most outputs in config map to specific UI components
110
+ - Standard Output Pattern:
111
+ * Base output name (e.g., 'generated_prd') MUST map to TWO components:
112
+ - base_name_text: For textbox display
113
+ - base_name_markdown: For markdown display
114
+ - Special Case - Mandays Pattern:
115
+ * Mandays outputs (e.g., 'generated_plan_test_mandays') MUST map to ONE component:
116
+ - base_name_dataframe: For dataframe display
117
+
118
+ 3. INPUT TO UI COMPONENT MAPPING RULES:
119
+ - Only UI-interactive inputs need component mappings
120
+ - When needed, inputs MUST reference either the _text or _markdown variant
121
+
122
+ 4. NAMING CONVENTION (Source of Truth: Config File)
123
+ MUST CHECK:
124
+ - Prompt names (e.g., "generate_dev_components")
125
+ - Component names (e.g., "generated_Tech_SOW_text")
126
+ - Input/Output references
127
+
128
+ WHERE TO CHECK:
129
+ - step_handlers.py: Function names, component references
130
+ - Project.py: Method names, mappings
131
+ - all_components dictionary: Component keys
132
+
133
+ 5. HANDLER VALIDATION RULES:
134
+ - Each step handler must correctly map:
135
+ * Inputs to component _text or _markdown
136
+ * Outputs to both _text and _markdown (or _dataframe for mandays)
137
+ - all_components dictionary keys must exactly match config naming
138
+ - Function parameters should align with Project.py signatures
139
+
140
+ Context Files:
141
+ 1. Current Config:
142
+ {config_content}y
143
+
144
+ 2. Project.py:
145
+ {project_content}
146
+
147
+ 3. step_handlers.py:
148
+ {current_handlers}
149
+
150
+ Return a JSON analysis with this EXACT structure:
151
+ {{
152
+ "component_mappings": {{
153
+ "outputs": [
154
  {{
155
+ "config_name": "base_output_name",
156
+ "required_components": ["component1", "component2"],
157
+ "current_components": ["existing1", "existing2"],
158
+ "is_valid": false,
159
+ "issues": ["detailed_issue_description"]
160
  }}
161
+ ],
162
+ "inputs": [
163
+ {{
164
+ "config_name": "input_name",
165
+ "required_component": "required_component_name",
166
+ "current_component": "current_component_name",
167
+ "is_valid": false,
168
+ "issues": ["detailed_issue_description"]
169
+ }}
170
+ ]
171
+ }},
172
+ "step_handlers": {{
173
+ "steps": [
174
+ {{
175
+ "step": "step_number",
176
+ "input_mappings": {{
177
+ "is_valid": false,
178
+ "issues": ["detailed_issue_description"]
179
+ }},
180
+ "output_mappings": {{
181
+ "is_valid": false,
182
+ "issues": ["detailed_issue_description"]
183
+ }}
184
+ }}
185
+ ]
186
+ }},
187
+ "required_updates": {{
188
+ "Project.py": [
189
+ {{
190
+ "type": "method|mapping",
191
+ "location": "exact_location",
192
+ "reason": "detailed_explanation"
193
+ }}
194
+ ],
195
+ "step_handlers.py": [
196
+ {{
197
+ "step": "step_number",
198
+ "type": "component|input|output",
199
+ "current": "current_code",
200
+ "required": "required_code",
201
+ "reason": "detailed_explanation"
202
+ }}
203
+ ]
204
+ }}
205
+ }}
206
+
207
+ IMPORTANT:
208
+ 1. Validate EVERY output has correct component mappings
209
+ 2. Check EVERY input references correct component variant
210
+ 3. Verify ALL component names match exactly
211
+ 4. Flag ANY case mismatches or naming inconsistencies
212
+ 5. Return ONLY the JSON object, no additional text
213
+ """
214
+
215
+ try:
216
+ result = call_o1_mini(analysis_prompt)
217
+ cleaned_result = clean_ai_response(result)
218
+ return json.loads(cleaned_result)
219
+ except Exception as e:
220
+ print(f"Error in analysis: {str(e)}")
221
+ print("Raw response:", result)
222
+ return {
223
+ "component_mappings": {"outputs": [], "inputs": []},
224
+ "step_handlers": {"steps": []},
225
+ "required_updates": {}
226
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
 
 
 
 
 
 
 
 
228
 
229
+ def generate_code_updates(analysis_result: Dict) -> Dict:
230
+ """Second AI: Generates specific code updates based on the analysis"""
231
+ code_generation_prompt = f"""
232
+ You are an expert code generator. Based on this analysis, generate EXACT code updates.
233
+
234
+ Analysis Result:
235
+ {json.dumps(analysis_result, indent=2)}
 
236
 
237
+ Generate a JSON response with this EXACT structure:
238
+ {{
239
+ "Project.py": {{
240
+ "updates": [
241
+ {{
242
+ "location": "exact_location",
243
+ "explanation": "detailed_explanation",
244
+ "old_code": "existing_code",
245
+ "new_code": "updated_code"
246
+ }}
247
+ ]
248
+ }},
249
+ "step_handlers.py": {{
250
+ "updates": [
251
+ {{
252
+ "location": "exact_location",
253
+ "explanation": "detailed_explanation",
254
+ "old_code": "existing_code",
255
+ "new_code": "updated_code"
256
+ }}
257
+ ]
258
+ }}
259
+ }}
260
+
261
+ Requirements:
262
+ 1. Maintain exact function signatures
263
+ 2. Preserve existing code structure
264
+ 3. Only generate the specific changes needed
265
+ 4. Include clear comments explaining changes
266
+ 5. Each file must have an 'updates' array, even if empty
267
+
268
+ IMPORTANT: Return ONLY the JSON object, without any markdown formatting or explanation.
269
+ Do not include ```json or ``` markers.
270
+ """
271
+
272
+ try:
273
+ result = call_o1_mini(code_generation_prompt)
274
+ cleaned_result = clean_ai_response(result)
275
+ parsed_result = json.loads(cleaned_result)
276
+
277
+ # Ensure correct structure
278
+ if "Project.py" not in parsed_result:
279
+ parsed_result["Project.py"] = {"updates": []}
280
+ if "step_handlers.py" not in parsed_result:
281
+ parsed_result["step_handlers.py"] = {"updates": []}
282
+
283
+ # Ensure updates array exists
284
+ if "updates" not in parsed_result["Project.py"]:
285
+ parsed_result["Project.py"]["updates"] = []
286
+ if "updates" not in parsed_result["step_handlers.py"]:
287
+ parsed_result["step_handlers.py"]["updates"] = []
288
+
289
+ return parsed_result
290
+
291
+ except Exception as e:
292
+ print(f"Error generating code updates: {str(e)}")
293
+ print("Raw response:", result)
294
+ return {
295
+ "Project.py": {"updates": []},
296
+ "step_handlers.py": {"updates": []}
297
+ }
298
 
299
+ def apply_project_updates(updates: List[Dict]) -> bool:
300
+ """Apply updates to Project.py"""
301
+ try:
302
+ with open('Project.py', 'r') as f:
303
+ content = f.read()
304
+
305
+ for update in updates:
306
+ if update['type'] == 'method':
307
+ # Add new methods before the last class method
308
+ class_end = content.rfind('\n\n')
309
+ if class_end == -1:
310
+ return False
311
+ content = content[:class_end] + '\n' + update['new_code'] + content[class_end:]
312
+
313
+ elif update['type'] == 'mapping':
314
+ # Update INPUT_MAPPINGS
315
+ input_mappings_start = content.find('INPUT_MAPPINGS = {')
316
+ if input_mappings_start != -1:
317
+ input_mappings_end = content.find('}', input_mappings_start)
318
+ if input_mappings_end != -1:
319
+ # Replace old mapping with new one
320
+ content = content[:input_mappings_start] + update['new_code'] + content[input_mappings_end + 1:]
321
+
322
+ with open('Project.py', 'w') as f:
323
+ f.write(content)
324
+
325
+ return True
326
+
327
+ except Exception as e:
328
+ print(f"Error applying Project.py updates: {str(e)}")
329
+ return False
330
+
331
+ def apply_handler_updates(updates: List[Dict]) -> bool:
332
+ """Apply updates to step_handlers.py"""
333
+ try:
334
+ with open('step_handlers.py', 'r') as f:
335
+ content = f.read()
336
+
337
+ for update in updates:
338
+ if update['type'] == 'component':
339
+ # Update component references
340
+ content = content.replace(update['old_code'], update['new_code'])
341
+ elif update['type'] == 'input':
342
+ # Update input parameters
343
+ pattern = rf"inputs=\[([^\]]*)\]"
344
+ replacement = f"inputs=[{update['new_code']}]"
345
+ content = re.sub(pattern, replacement, content)
346
+ elif update['type'] == 'output':
347
+ # Update output parameters
348
+ pattern = rf"outputs=\[([^\]]*)\]"
349
+ replacement = f"outputs=[{update['new_code']}]"
350
+ content = re.sub(pattern, replacement, content)
351
+
352
+ with open('step_handlers.py', 'w') as f:
353
+ f.write(content)
354
+
355
+ return True
356
+
357
+ except Exception as e:
358
+ print(f"Error applying handler updates: {str(e)}")
359
+ return False
360
+
361
+ def extract_config_without_prompts() -> str:
362
+ """Extract config file content without prompts"""
363
+ try:
364
+ with open('page_prompts_config.py', 'r') as f:
365
+ config_lines = []
366
+ in_prompt_block = False
367
+ in_multiline_string = False
368
+ prompt_indent = 0
369
+
370
+ for line in f:
371
+ stripped_line = line.strip()
372
+
373
+ # Skip empty lines
374
+ if not stripped_line:
375
+ continue
376
+
377
+ # Detect start of multi-line string
378
+ if '"""' in line:
379
+ if not in_multiline_string:
380
+ in_multiline_string = True
381
+ in_prompt_block = True
382
+ continue
383
+ else:
384
+ in_multiline_string = False
385
+ in_prompt_block = False
386
+ continue
387
+
388
+ # Skip lines while in a prompt block
389
+ if in_prompt_block or in_multiline_string:
390
+ continue
391
+
392
+ # Detect single-line prompt assignments
393
+ if 'prompt=' in line and not in_multiline_string:
394
+ continue
395
+
396
+ # Keep all other lines
397
+ config_lines.append(line)
398
+
399
+ return ''.join(config_lines)
400
+ except Exception as e:
401
+ return f"Error reading config file: {str(e)}"
402
+
403
+ def main():
404
+ """Main function to handle config changes"""
405
+ try:
406
+ # Read current files
407
+ config_content = extract_config_without_prompts()
408
+ if config_content.startswith("Error"):
409
+ print(config_content)
410
+ return
411
+
412
+ with open('Project.py', 'r') as f:
413
+ project_content = f.read()
414
+ with open('step_handlers.py', 'r') as f:
415
+ current_handlers = f.read()
416
+
417
+ # First AI: Analyze current structure
418
+ print("\nAnalyzing configuration structure...")
419
+ analysis_result = analyze_config_structure(config_content, project_content, current_handlers)
420
+
421
+ if not analysis_result.get('component_mappings'):
422
+ print("Error: Invalid analysis result format")
423
+ return
424
+
425
+ # Display analysis results
426
+ print("\nConfiguration Analysis:")
427
+ print("1. Output Mappings:", len(analysis_result['component_mappings']['outputs']))
428
+ print("2. Input Mappings:", len(analysis_result['component_mappings']['inputs']))
429
+ print("3. Step Handlers:", len(analysis_result['step_handlers']['steps']))
430
+
431
+ if analysis_result.get('required_updates'):
432
+ print("\nRequired Updates:")
433
+ for file, updates in analysis_result['required_updates'].items():
434
+ print(f"\n{file}:")
435
+ for update in updates:
436
+ print(f"- {update['type']}: {update['reason']}")
437
+
438
+ if analysis_result.get('validation_issues'):
439
+ print("\nValidation Issues:")
440
+ for issue in analysis_result['validation_issues']:
441
+ print(f"- {issue['type']} ({issue['severity']}): {issue['description']}")
442
+
443
+ # Get user confirmation
444
+ confirm = input("\nProceed with generating code updates? (y/n): ")
445
+ if confirm.lower() != 'y':
446
+ print("Analysis complete. Update cancelled.")
447
+ return
448
+
449
+ # Second AI: Generate code updates
450
+ print("\nGenerating code updates...")
451
+ code_updates = generate_code_updates(analysis_result)
452
+
453
+ # Display proposed changes
454
+ print("\nProposed Code Updates:")
455
+ for file, updates in code_updates.items():
456
+ print(f"\n{file}:")
457
+ for update in updates['updates']:
458
+ print(f"- Location: {update['location']}")
459
+ print(f" Explanation: {update['explanation']}")
460
+ print(" Old code:")
461
+ print(f"```\n{update['old_code']}\n```")
462
+ print(" New code:")
463
+ print(f"```\n{update['new_code']}\n```")
464
+
465
+ # Final confirmation
466
+ confirm = input("\nApply these code updates? (y/n): ")
467
+ if confirm.lower() != 'y':
468
+ print("Updates cancelled.")
469
+ return
470
+
471
+ # Apply updates
472
+ success = True
473
+ if 'Project.py' in code_updates:
474
+ success &= apply_project_updates(code_updates['Project.py']['updates'])
475
+ if 'step_handlers.py' in code_updates:
476
+ success &= apply_handler_updates(code_updates['step_handlers.py']['updates'])
477
+
478
+ if success:
479
+ print("Successfully updated all files")
480
+ else:
481
+ print("Some updates failed - please check the files manually")
482
+
483
+ except Exception as e:
484
+ print(f"Error in update process: {str(e)}")
485
+ import traceback
486
+ print("Traceback:", traceback.format_exc())
487
 
488
  if __name__ == "__main__":
489
+ main()
 
 
code_updater.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
+ import openai
3
+ from contextlib import contextmanager
4
+ import re
5
+ import json
6
+
7
+ @contextmanager
8
+ def openai_session():
9
+ """Context manager to properly handle OpenAI API sessions"""
10
+ try:
11
+ client = openai.OpenAI()
12
+ yield client
13
+ finally:
14
+ if hasattr(client, 'close'):
15
+ client.close()
16
+
17
+ def call_o1_mini(prompt: str) -> str:
18
+ """Call the o1-mini model with the given prompt"""
19
+ with openai_session() as client:
20
+ try:
21
+ response = client.chat.completions.create(
22
+ model="o1-mini",
23
+ messages=[{"role": "user", "content": prompt}]
24
+ )
25
+ return response.choices[0].message.content
26
+ except Exception as e:
27
+ return f"Error generating output: {str(e)}"
28
+
29
+ def extract_config_without_prompts() -> str:
30
+ """Extract config file content without prompts"""
31
+ try:
32
+ with open('page_prompts_config.py', 'r') as f:
33
+ config_lines = []
34
+ in_prompt_block = False
35
+ in_multiline_string = False
36
+ prompt_indent = 0
37
+
38
+ for line in f:
39
+ stripped_line = line.strip()
40
+
41
+ # Skip empty lines
42
+ if not stripped_line:
43
+ continue
44
+
45
+ # Detect start of multi-line string
46
+ if '"""' in line:
47
+ if not in_multiline_string:
48
+ in_multiline_string = True
49
+ in_prompt_block = True
50
+ continue
51
+ else:
52
+ in_multiline_string = False
53
+ in_prompt_block = False
54
+ continue
55
+
56
+ # Skip lines while in a prompt block
57
+ if in_prompt_block or in_multiline_string:
58
+ continue
59
+
60
+ # Detect single-line prompt assignments
61
+ if 'prompt=' in line and not in_multiline_string:
62
+ continue
63
+
64
+ # Keep all other lines
65
+ config_lines.append(line)
66
+
67
+ return ''.join(config_lines)
68
+ except Exception as e:
69
+ return f"Error reading config file: {str(e)}"
70
+
71
+ def analyze_files(config_content: str, project_content: str, current_handlers: str) -> dict:
72
+ """First analyze all files and extract relevant information"""
73
+ analysis_prompt = f"""
74
+ You are an expert software engineer performing a comprehensive analysis. You must understand these key relationships:
75
+
76
+ 1. Config Output to UI Components Mapping:
77
+ - A single output in config (e.g., 'generated_bd_SOW') can map to multiple UI components
78
+ - UI components typically come in pairs:
79
+ * _text: For textbox display
80
+ * _markdown: For markdown display
81
+ - As for mandays , it only maps to one UI components which is in *_dataframe
82
+ Example:
83
+ Config output 'generated_bd_SOW' maps to:
84
+ - generated_bd_SOW_text
85
+ - generated_bd_SOW_markdown
86
+
87
+ 2. Config Input to UI Component Mapping :
88
+ - The inputs should take the _text or _markdown of the component
89
+
90
+ 3. Component Name and Config Key Validation:
91
+ - Names in handlers must exactly match config
92
+ - Both section names and component names must match
93
+ - Pay special attention to component name changes
94
+ - Pay attention to case sensitivity
95
+
96
+ 4. Prompt Name Validation :
97
+ - Prompt names in config are the source of truth
98
+ - Any references to these names in Project.py and step_handlers.py must match EXACTLY
99
+ - Check for:
100
+ * Function names in step_handlers.py
101
+ * Component references in all_components dictionary
102
+ * Input/output mappings in step handlers
103
+ * Method names in Project.py
104
+
105
+ 4. Input/Output Name Validation:
106
+ - Config keys must match exactly in handlers
107
+ - Check all_components dictionary keys in handlers against config
108
+ - Example: if config uses 'generate_development_mandays', handlers must use the same key
109
+ - Flag any mismatches between config keys and handler references
110
+
111
+ 4. Function Parameter Mapping:
112
+ - Function parameters in Project.py define the actual data flow
113
+ - UI components must map to these parameters correctly
114
+
115
+ Context Files:
116
+
117
+ 1. page_prompts_config.py (SOURCE OF TRUTH FOR NAMES & UI):
118
+ {config_content}
119
+
120
+ 2. Project.py (SOURCE OF TRUTH FOR FUNCTION PARAMETERS):
121
+ {project_content}
122
+
123
+ 3. step_handlers.py (NEEDS VALIDATION):
124
+ {current_handlers}
125
+
126
+ Analysis Requirements:
127
+
128
+ 1. UI Component Validation:
129
+ - Check each config output's UI components
130
+ - Verify both _text and _markdown variants exist where specified
131
+ - Ensure case matches exactly
132
+
133
+ 2. Component Name Validation:
134
+ - Compare against config definitions
135
+ - Check both section names and component names
136
+ - Flag any case mismatches
137
+
138
+ 3. Function Parameter Validation:
139
+ - Verify against Project.py signatures
140
+ - Check parameter order and types
141
+ - Ensure all required parameters are included
142
+
143
+
144
+ Return your analysis in this EXACT JSON format:
145
+ {{
146
+ "step_1": {{
147
+ "current_code": "exact current handler code",
148
+ "needs_update": false,
149
+ "updates": []
150
+ }},
151
+ "step_2": {{
152
+ "current_code": "exact current handler code",
153
+ "needs_update": true,
154
+ "updates": [
155
+ {{
156
+ "type": "component_name",
157
+ "line": "all_components['generate_BD_SOW']['generated_BD_SOW_text']",
158
+ "replacement": "all_components['generate_bd_SOW']['generated_bd_SOW_text']",
159
+ "reason": "Case mismatch with config definition"
160
+ }}
161
+ ]
162
+ }},
163
+ "step_3": {{
164
+ "current_code": "exact current handler code",
165
+ "needs_update": true,
166
+ "updates": [
167
+ {{
168
+ "type": "component_name",
169
+ "line": "all_components['generate_BD_SOW']['generated_BD_SOW_text']",
170
+ "replacement": "all_components['generate_bd_SOW']['generated_bd_SOW_text']",
171
+ "reason": "Case mismatch with config definition"
172
+ }}
173
+ ]
174
+ }}
175
+ }}
176
+
177
+ Requirements:
178
+ 1. Include ALL steps (1-3)
179
+ 2. For each step:
180
+ - Include exact current code
181
+ - Set needs_update to true if changes needed
182
+ - List ALL required updates
183
+ 3. For each update:
184
+ - Provide exact current line
185
+ - Provide exact replacement
186
+ - Include clear reason
187
+ 4. Check:
188
+ - Case sensitivity
189
+ - Component names
190
+ - Function parameters
191
+ - UI component mappings
192
+
193
+ Every discrepancy must be reported as an issue with:
194
+ - Clear description of the problem
195
+ - Exact current implementation
196
+ - Exact expected implementation from config
197
+ - Specific location in the code
198
+
199
+ If no issues are found for a step, include an empty issues array.
200
+
201
+ Remember:
202
+ - Component names must match config
203
+ - Function parameters must match Project.py signatures
204
+ - Some components may be correct in function but need case fixes from config
205
+ - DO NOT suggest changes unless there is a 100% exact mismatch
206
+ - If a component or parameter does not have any changes and it does exist and works, DO NOT suggest changes
207
+ """
208
+
209
+ analysis = call_o1_mini(analysis_prompt)
210
+ return analysis
211
+
212
+ def generate_handler_code(step_number: int, analysis_result: dict) -> str:
213
+ """Generate the handler code based on the analysis"""
214
+ step_key = f"step_{step_number}"
215
+ step_data = analysis_result.get(step_key, {})
216
+
217
+ if not step_data.get("needs_update", False):
218
+ return ""
219
+
220
+ current_code = step_data["current_code"]
221
+ updates = step_data.get("updates", [])
222
+
223
+ # Apply all updates
224
+ updated_code = current_code
225
+ for update in updates:
226
+ updated_code = updated_code.replace(update["line"], update["replacement"])
227
+
228
+ return updated_code
229
+
230
+ def analyze_prompts_and_generate_handlers() -> Dict[int, str]:
231
+ """Generate step handlers separately"""
232
+ try:
233
+ # Read all files once
234
+ config_content = extract_config_without_prompts()
235
+ print("\nConfig content loaded")
236
+
237
+ with open('Project.py', 'r') as f:
238
+ project_content = f.read()
239
+ print("Project.py content loaded")
240
+
241
+ with open('step_handlers.py', 'r') as f:
242
+ current_handlers = f.read()
243
+ print("step_handlers.py content loaded")
244
+
245
+ # Do one analysis for all files
246
+ print("\nAnalyzing all files...")
247
+ analysis_result = analyze_files(config_content, project_content, current_handlers)
248
+
249
+ # Parse the analysis result from string to JSON
250
+ try:
251
+ if isinstance(analysis_result, str):
252
+ # Remove ```json and ``` markers if present
253
+ analysis_result = analysis_result.replace('```json\n', '').replace('\n```', '').strip()
254
+ analysis_result = json.loads(analysis_result)
255
+
256
+ print("\nRaw analysis result:")
257
+ print(json.dumps(analysis_result, indent=2))
258
+
259
+ handlers = {}
260
+ for step in range(1, 4):
261
+ step_key = f"step_{step}"
262
+ if step_key not in analysis_result:
263
+ print(f"\nNo analysis results found for {step_key}")
264
+ continue
265
+
266
+ step_code = generate_handler_code(step, analysis_result)
267
+
268
+ # Clean up and validate the response
269
+ if step_code:
270
+ step_code = step_code.strip()
271
+
272
+ if not step_code.startswith(f"step_buttons['Step {step}"):
273
+ print(f"\nInvalid handler code format for Step {step}")
274
+ # Extract current handler as fallback
275
+ pattern = rf"step_buttons\['Step {step} : [^']*'\]\.click\([^)]+\)"
276
+ current_handler = re.search(pattern, current_handlers, re.DOTALL)
277
+ handlers[step] = current_handler.group(0) if current_handler else None
278
+ else:
279
+ handlers[step] = step_code
280
+ print(f"Generated handler for Step {step}")
281
+
282
+ return handlers
283
+
284
+ except json.JSONDecodeError as e:
285
+ print(f"Error parsing analysis result: {str(e)}")
286
+ print("Raw analysis result:", analysis_result)
287
+ return {}
288
+
289
+ except Exception as e:
290
+ print(f"Error generating handlers: {str(e)}")
291
+ import traceback
292
+ print("Traceback:", traceback.format_exc())
293
+ return {}
294
+
295
+ def main():
296
+ """Generate and save the step handlers"""
297
+ handlers = analyze_prompts_and_generate_handlers()
298
+ print("Generated handlers for steps:", list(handlers.keys()))
299
+
300
+ # Let the user review the changes
301
+ for step, code in handlers.items():
302
+ if not code:
303
+ continue
304
+
305
+ print(f"\nStep {step} Handler:")
306
+ print(code)
307
+ update = input(f"Do you want to update Step {step} handler? (y/n): ")
308
+
309
+ if update.lower() == 'y':
310
+ try:
311
+ # Read current content
312
+ with open('step_handlers.py', 'r') as f:
313
+ content = f.read()
314
+
315
+ # Find and replace the specific step handler
316
+ pattern = rf"step_buttons\['Step {step} : [^']*'\]\.click\([^)]+\)"
317
+ match = re.search(pattern, content, re.DOTALL)
318
+
319
+ if not match:
320
+ print(f"Warning: Couldn't find Step {step} handler in file")
321
+ continue
322
+
323
+ # Replace the matched content with the new code
324
+ updated_content = content[:match.start()] + code.strip() + content[match.end():]
325
+
326
+ # Write back to file
327
+ with open('step_handlers.py', 'w') as f:
328
+ f.write(updated_content)
329
+
330
+ print(f"Successfully updated Step {step} handler in step_handlers.py")
331
+
332
+ # Verify the write
333
+ with open('step_handlers.py', 'r') as f:
334
+ new_content = f.read()
335
+ if code.strip() in new_content:
336
+ print("Verified: Update successful")
337
+ else:
338
+ print("Warning: Update may not have been successful")
339
+
340
+ except Exception as e:
341
+ print(f"Error updating file: {str(e)}")
342
+ print(f"Error details: {type(e).__name__}")
343
+
344
+ if __name__ == "__main__":
345
+ main()
page_prompts_config.py CHANGED
@@ -128,7 +128,7 @@ PROMPTS = {
128
  }
129
  ),
130
 
131
- "rewrite_qa": PromptConfig(
132
  prompt=
133
  """
134
  Rewrite this for clarity while keeping all specific details, metrics, and constraints.
@@ -144,19 +144,19 @@ PROMPTS = {
144
  ui={
145
  "requirement_prompt_editor": UIConfig(
146
  component_type=UIComponentType.TEXTBOX,
147
- label="Requirements System Prompt",
148
  lines=20,
149
  interactive=True
150
  ),
151
- "page_prd_box_text": UIConfig(
152
  component_type=UIComponentType.TEXTBOX,
153
- label="Requirements Output",
154
  lines=20,
155
  visible=True
156
  ),
157
- "page_prd_box_markdown": UIConfig(
158
  component_type=UIComponentType.MARKDOWN,
159
- label="Requirements Output",
160
  visible=False,
161
  show_copy_button=True
162
  )
@@ -248,7 +248,7 @@ PROMPTS = {
248
  }
249
  ),
250
 
251
- "generate_components": PromptConfig(
252
  prompt=
253
  """
254
  Context:
@@ -268,24 +268,24 @@ PROMPTS = {
268
  Include sub-bullets for tasks or subcomponents where necessary to provide additional detail.
269
  """,
270
  inputs=["generated_prd"],
271
- outputs=["derived_plan_test_components"],
272
  model=ModelType.O1_MINI,
273
  description="Generate planning and testing components",
274
  step="Step 1 : Scope & Components",
275
  ui={
276
- "plan_text_prompt_editor": UIConfig(
277
  component_type=UIComponentType.TEXTBOX,
278
  label="Plan Test System Prompt",
279
  lines=20,
280
  interactive=True
281
  ),
282
- "page_plan_test_component_table_text": UIConfig(
283
  component_type=UIComponentType.TEXTBOX,
284
  label="Plan & Test Components Output",
285
  lines=20,
286
  visible=True
287
  ),
288
- "page_plan_test_component_table_markdown": UIConfig(
289
  component_type=UIComponentType.MARKDOWN,
290
  label="Plan & Test Components Output",
291
  visible=False,
@@ -334,24 +334,24 @@ PROMPTS = {
334
  Please respond in English only.
335
  """,
336
  inputs=["generated_prd"],
337
- outputs=["derived_dev_components"],
338
  model=ModelType.O1_MINI,
339
  description="Generate development components",
340
  step="Step 1 : Scope & Components",
341
  ui={
342
- "dev_prompt_editor": UIConfig(
343
  component_type=UIComponentType.TEXTBOX,
344
  label="Development System Prompt",
345
  lines=20,
346
  interactive=True
347
  ),
348
- "page_dev_component_table_text": UIConfig(
349
  component_type=UIComponentType.TEXTBOX,
350
  label="Development Components Output",
351
  lines=20,
352
  visible=True
353
  ),
354
- "page_dev_component_table_markdown": UIConfig(
355
  component_type=UIComponentType.MARKDOWN,
356
  label="Developemnt Components Output",
357
  visible=False,
@@ -360,15 +360,11 @@ PROMPTS = {
360
  }
361
  ),
362
 
363
-
364
  "generate_plan_test_mandays": PromptConfig(
365
  prompt=
366
  """
367
  You are an experienced project manager tasked to create a detailed task breakdown for a project based on the planning and testing component list.
368
 
369
- Input Components:
370
- {derived_plan_test_components}
371
-
372
  Objective:
373
  Generate a structured CSV output with manday estimates for each planning and testing component.
374
 
@@ -393,19 +389,19 @@ PROMPTS = {
393
 
394
  Return only the CSV content, no code blocks or additional text.
395
  """,
396
- inputs=["derived_plan_test_components"],
397
- outputs=["plan_test_mandays"],
398
  model=ModelType.O1_MINI,
399
- description="Generate planning and testing mandays",
400
- step="Step 2 : Planning & Testing Components",
401
  ui={
402
- "plantest_mandays_prompt_editor": UIConfig(
403
  component_type=UIComponentType.TEXTBOX,
404
  label="Planning & Testing Mandays System Prompt",
405
  lines=20,
406
  interactive=True
407
  ),
408
- "page_plan_test_mandays": UIConfig(
409
  component_type=UIComponentType.DATAFRAME,
410
  label="Plan & Test Mandays",
411
  interactive=True
@@ -418,8 +414,6 @@ PROMPTS = {
418
  """
419
  You are an experienced project manager tasked to create a detailed task breakdown for development components.
420
 
421
- Input Components:
422
- {derived_dev_components}
423
 
424
  Objective:
425
  Generate a structured CSV output with manday estimates for each development component and subcomponent from the provided development component list.
@@ -454,11 +448,11 @@ PROMPTS = {
454
 
455
  YOU MUST GENERATE ACTUAL DATA ROWS based on the development component list provided.
456
  """,
457
- inputs=["derived_dev_components"],
458
- outputs=["dev_mandays"],
459
  model=ModelType.O1_MINI,
460
- description="Generate development mandays",
461
- step="Step 2 : Planning & Testing Components",
462
  ui={
463
  "dev_mandays_prompt_editor": UIConfig(
464
  component_type=UIComponentType.TEXTBOX,
@@ -466,7 +460,7 @@ PROMPTS = {
466
  lines=20,
467
  interactive=True
468
  ),
469
- "page_dev_mandays": UIConfig(
470
  component_type=UIComponentType.DATAFRAME,
471
  label="Dev Mandays",
472
  interactive=True
@@ -521,7 +515,7 @@ PROMPTS = {
521
  Instructions:
522
  Replace <client context> with relevant information derived from the provided client background.
523
  Only provide the list of formatted questions without any additional introduction or summary.
524
- """,
525
  inputs=['project_detail'],
526
  outputs=['gathered_project_input'],
527
  model=ModelType.O1_MINI,
@@ -584,7 +578,7 @@ PROMPTS = {
584
  """
585
  As an project manager with 20+ years of experience, you are tasked to create a detailed Scope of Work (SOW) document. Analyze the provided project component list and scope document to generate the following sections. Follow the guidelines below to ensure a professional, structured, and client-ready output:
586
 
587
- ### **Scope of Work (SOW
588
  #### **1. Project Background**
589
  - Provide a brief overview of the project, including the context, problem statement, and why the project is being initiated.
590
  - Break down key challenges (in bullet point) the company currently facing, quantifying the impacts where possible (e.g., lost revenue, downtime).
@@ -603,7 +597,7 @@ PROMPTS = {
603
  - Textual Description: Detailed explanation of the processes and transitions
604
  - Use bullet point, ensure simplicity and avoid overly technical langua
605
  #### **5. Modules and Functional Requirements Breakdown**
606
- - LEAVE THIS BL
607
  #### **6. Acceptance Criteria**
608
  - Define conditions to be met, including specific, measurable criteria for project completion:
609
  - Link each deliverable/module to its validation or testing process (e.g., UAT).
@@ -625,7 +619,7 @@ PROMPTS = {
625
  - Expected Date/Duration
626
  - Outcome/Deliverable
627
  - Use a Gantt chart or table to visualize the timeline.
628
- - Ensure the output are clean, organized, and easy to re
629
  #### **9. Commercial**
630
  Summarize the project's commercial details in the following subsections:
631
  - Development Fee: Create a table summarizing the costs for development, including the product, technical work supporting, or other additional services provided
@@ -634,7 +628,7 @@ PROMPTS = {
634
  - Milestones: Specify at which stages payments are due
635
  - Invoicing: Define invoicing intervals (e.g., monthly, quarterly) and payment deadlines
636
  - Other Terms: Mention late payment fees or additional terms, if applicable
637
- - Output Format for tables: {Service}, {Fee} (leave amount bla
638
  #### **10. Sign-Off**
639
  - Create a professional and formal Sign-Off section to acknowledge and approve the SOW.
640
  - Include an statement to clearly communicate that both parties have reviewed and agreed to the SOW.
@@ -642,15 +636,15 @@ PROMPTS = {
642
  - Signature
643
  - Name
644
  - Position
645
- - D
646
  #### **Guidelines**
647
  - Use bullet points for clarity.
648
  - Keep descriptions concise and client-friendly; avoid technical jargon unless necessary.
649
- - Maintain structured sections and tables for readabili
650
  Expected output should be professional, well-structured, and designed to help clients and stakeholders clearly understand the project scope. I'm going to tip you for a better outcome!
651
  """,
652
- inputs=["generated_prd", "derived_plan_test_components", "derived_dev_components", "quotation_cost"],
653
- outputs=["general_sow"],
654
  model=ModelType.O1_MINI,
655
  description="Generate BD SOW",
656
  step="Step 3 : SOW Doc",
@@ -661,13 +655,13 @@ PROMPTS = {
661
  lines=20,
662
  interactive=True
663
  ),
664
- "page_general_sow_text": UIConfig(
665
  component_type=UIComponentType.TEXTBOX,
666
  label="BD SOW Doc",
667
  lines=20,
668
  visible=True
669
  ),
670
- "page_general_sow_markdown": UIConfig(
671
  component_type=UIComponentType.MARKDOWN,
672
  label="BD SOW Doc",
673
  visible=False,
@@ -675,7 +669,7 @@ PROMPTS = {
675
  )
676
  }
677
  ),
678
-
679
  "generate_Tech_SOW": PromptConfig(
680
  prompt=
681
  """
@@ -724,8 +718,8 @@ PROMPTS = {
724
  "system_flow": <markdown content>
725
  }
726
  """,
727
- inputs=["derived_plan_test_components", "derived_dev_components"],
728
- outputs=["detailed_sow"],
729
  model=ModelType.O1_MINI,
730
  description="Generate Test SOW",
731
  step="Step 3 : SOW Doc",
@@ -736,13 +730,13 @@ PROMPTS = {
736
  lines=20,
737
  interactive=True
738
  ),
739
- "page_detailed_sow_text": UIConfig(
740
  component_type=UIComponentType.TEXTBOX,
741
  label="Technical SOW Doc",
742
  lines=20,
743
  visible=True
744
  ),
745
- "page_detailed_sow_markdown": UIConfig(
746
  component_type=UIComponentType.MARKDOWN,
747
  label="Technical SOW Doc",
748
  visible=False,
@@ -750,5 +744,6 @@ PROMPTS = {
750
  )
751
  }
752
  ),
 
753
  }
754
 
 
128
  }
129
  ),
130
 
131
+ "generate_prd": PromptConfig(
132
  prompt=
133
  """
134
  Rewrite this for clarity while keeping all specific details, metrics, and constraints.
 
144
  ui={
145
  "requirement_prompt_editor": UIConfig(
146
  component_type=UIComponentType.TEXTBOX,
147
+ label="Requirements(PRD) System Prompt",
148
  lines=20,
149
  interactive=True
150
  ),
151
+ "generated_prd_text": UIConfig(
152
  component_type=UIComponentType.TEXTBOX,
153
+ label="Requirements(PRD) Output",
154
  lines=20,
155
  visible=True
156
  ),
157
+ "generated_prd_markdown": UIConfig(
158
  component_type=UIComponentType.MARKDOWN,
159
+ label="Requirements(PRD) Output",
160
  visible=False,
161
  show_copy_button=True
162
  )
 
248
  }
249
  ),
250
 
251
+ "generate_plan_test_components": PromptConfig(
252
  prompt=
253
  """
254
  Context:
 
268
  Include sub-bullets for tasks or subcomponents where necessary to provide additional detail.
269
  """,
270
  inputs=["generated_prd"],
271
+ outputs=["generated_plan_test_components"],
272
  model=ModelType.O1_MINI,
273
  description="Generate planning and testing components",
274
  step="Step 1 : Scope & Components",
275
  ui={
276
+ "plan_test_component_prompt_editor": UIConfig(
277
  component_type=UIComponentType.TEXTBOX,
278
  label="Plan Test System Prompt",
279
  lines=20,
280
  interactive=True
281
  ),
282
+ "generated_plan_test_components_text": UIConfig(
283
  component_type=UIComponentType.TEXTBOX,
284
  label="Plan & Test Components Output",
285
  lines=20,
286
  visible=True
287
  ),
288
+ "generated_plan_test_components_markdown": UIConfig(
289
  component_type=UIComponentType.MARKDOWN,
290
  label="Plan & Test Components Output",
291
  visible=False,
 
334
  Please respond in English only.
335
  """,
336
  inputs=["generated_prd"],
337
+ outputs=["generated_dev_components"],
338
  model=ModelType.O1_MINI,
339
  description="Generate development components",
340
  step="Step 1 : Scope & Components",
341
  ui={
342
+ "dev_component_prompt_editor": UIConfig(
343
  component_type=UIComponentType.TEXTBOX,
344
  label="Development System Prompt",
345
  lines=20,
346
  interactive=True
347
  ),
348
+ "generated_dev_components_text": UIConfig(
349
  component_type=UIComponentType.TEXTBOX,
350
  label="Development Components Output",
351
  lines=20,
352
  visible=True
353
  ),
354
+ "generated_dev_components_markdown": UIConfig(
355
  component_type=UIComponentType.MARKDOWN,
356
  label="Developemnt Components Output",
357
  visible=False,
 
360
  }
361
  ),
362
 
 
363
  "generate_plan_test_mandays": PromptConfig(
364
  prompt=
365
  """
366
  You are an experienced project manager tasked to create a detailed task breakdown for a project based on the planning and testing component list.
367
 
 
 
 
368
  Objective:
369
  Generate a structured CSV output with manday estimates for each planning and testing component.
370
 
 
389
 
390
  Return only the CSV content, no code blocks or additional text.
391
  """,
392
+ inputs=["generated_plan_test_components"],
393
+ outputs=["generated_plan_test_mandays"],
394
  model=ModelType.O1_MINI,
395
+ description="Generate planning and testing mandays from plan_test components table",
396
+ step="Step 2 : Planning & Testing Mandays",
397
  ui={
398
+ "plan_test_mandays_prompt_editor": UIConfig(
399
  component_type=UIComponentType.TEXTBOX,
400
  label="Planning & Testing Mandays System Prompt",
401
  lines=20,
402
  interactive=True
403
  ),
404
+ "generated_plan_test_mandays_dataframe": UIConfig(
405
  component_type=UIComponentType.DATAFRAME,
406
  label="Plan & Test Mandays",
407
  interactive=True
 
414
  """
415
  You are an experienced project manager tasked to create a detailed task breakdown for development components.
416
 
 
 
417
 
418
  Objective:
419
  Generate a structured CSV output with manday estimates for each development component and subcomponent from the provided development component list.
 
448
 
449
  YOU MUST GENERATE ACTUAL DATA ROWS based on the development component list provided.
450
  """,
451
+ inputs=["generated_dev_components"],
452
+ outputs=["generated_dev_mandays"],
453
  model=ModelType.O1_MINI,
454
+ description="Generate development mandays from dev components table",
455
+ step="Step 2 : Planning & Testing Mandays",
456
  ui={
457
  "dev_mandays_prompt_editor": UIConfig(
458
  component_type=UIComponentType.TEXTBOX,
 
460
  lines=20,
461
  interactive=True
462
  ),
463
+ "generated_dev_mandays_dataframe": UIConfig(
464
  component_type=UIComponentType.DATAFRAME,
465
  label="Dev Mandays",
466
  interactive=True
 
515
  Instructions:
516
  Replace <client context> with relevant information derived from the provided client background.
517
  Only provide the list of formatted questions without any additional introduction or summary.
518
+ """,
519
  inputs=['project_detail'],
520
  outputs=['gathered_project_input'],
521
  model=ModelType.O1_MINI,
 
578
  """
579
  As an project manager with 20+ years of experience, you are tasked to create a detailed Scope of Work (SOW) document. Analyze the provided project component list and scope document to generate the following sections. Follow the guidelines below to ensure a professional, structured, and client-ready output:
580
 
581
+ ### **Scope of Work (SOW)**
582
  #### **1. Project Background**
583
  - Provide a brief overview of the project, including the context, problem statement, and why the project is being initiated.
584
  - Break down key challenges (in bullet point) the company currently facing, quantifying the impacts where possible (e.g., lost revenue, downtime).
 
597
  - Textual Description: Detailed explanation of the processes and transitions
598
  - Use bullet point, ensure simplicity and avoid overly technical langua
599
  #### **5. Modules and Functional Requirements Breakdown**
600
+ - LEAVE THIS BLANK
601
  #### **6. Acceptance Criteria**
602
  - Define conditions to be met, including specific, measurable criteria for project completion:
603
  - Link each deliverable/module to its validation or testing process (e.g., UAT).
 
619
  - Expected Date/Duration
620
  - Outcome/Deliverable
621
  - Use a Gantt chart or table to visualize the timeline.
622
+ - Ensure the output are clean, organized, and easy to read
623
  #### **9. Commercial**
624
  Summarize the project's commercial details in the following subsections:
625
  - Development Fee: Create a table summarizing the costs for development, including the product, technical work supporting, or other additional services provided
 
628
  - Milestones: Specify at which stages payments are due
629
  - Invoicing: Define invoicing intervals (e.g., monthly, quarterly) and payment deadlines
630
  - Other Terms: Mention late payment fees or additional terms, if applicable
631
+ - Output Format for tables: {Service}, {Fee} (leave amount blank)
632
  #### **10. Sign-Off**
633
  - Create a professional and formal Sign-Off section to acknowledge and approve the SOW.
634
  - Include an statement to clearly communicate that both parties have reviewed and agreed to the SOW.
 
636
  - Signature
637
  - Name
638
  - Position
639
+ - Date
640
  #### **Guidelines**
641
  - Use bullet points for clarity.
642
  - Keep descriptions concise and client-friendly; avoid technical jargon unless necessary.
643
+ - Maintain structured sections and tables for readability.
644
  Expected output should be professional, well-structured, and designed to help clients and stakeholders clearly understand the project scope. I'm going to tip you for a better outcome!
645
  """,
646
+ inputs=["generated_prd", "generated_plan_test_components", "generated_dev_components", "quotation_cost"],
647
+ outputs=["generated_BD_SOW"],
648
  model=ModelType.O1_MINI,
649
  description="Generate BD SOW",
650
  step="Step 3 : SOW Doc",
 
655
  lines=20,
656
  interactive=True
657
  ),
658
+ "generated_BD_SOW_text": UIConfig(
659
  component_type=UIComponentType.TEXTBOX,
660
  label="BD SOW Doc",
661
  lines=20,
662
  visible=True
663
  ),
664
+ "generated_BD_SOW_markdown": UIConfig(
665
  component_type=UIComponentType.MARKDOWN,
666
  label="BD SOW Doc",
667
  visible=False,
 
669
  )
670
  }
671
  ),
672
+
673
  "generate_Tech_SOW": PromptConfig(
674
  prompt=
675
  """
 
718
  "system_flow": <markdown content>
719
  }
720
  """,
721
+ inputs=["generated_plan_test_components", "generated_dev_components"],
722
+ outputs=["generated_Tech_SOW"],
723
  model=ModelType.O1_MINI,
724
  description="Generate Test SOW",
725
  step="Step 3 : SOW Doc",
 
730
  lines=20,
731
  interactive=True
732
  ),
733
+ "generated_Tech_SOW_text": UIConfig(
734
  component_type=UIComponentType.TEXTBOX,
735
  label="Technical SOW Doc",
736
  lines=20,
737
  visible=True
738
  ),
739
+ "generated_Tech_SOW_markdown": UIConfig(
740
  component_type=UIComponentType.MARKDOWN,
741
  label="Technical SOW Doc",
742
  visible=False,
 
744
  )
745
  }
746
  ),
747
+
748
  }
749
 
step_handlers.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Project import *
2
+
3
+ def setup_all_handlers(step_buttons, all_components, page_progress_update, quotation_cost):
4
+ """Set up all step handlers with the provided UI components"""
5
+ step_buttons['Step 1 : Scope & Components'].click(
6
+ fn=v4_generate_prd_and_components,
7
+ inputs=[],
8
+ outputs=[
9
+ all_components['generate_prd']['generated_prd_text'],
10
+ all_components['generate_prd']['generated_prd_markdown'],
11
+ all_components['generate_plan_test_components']['generated_plan_test_components_text'],
12
+ all_components['generate_plan_test_components']['generated_plan_test_components_markdown'],
13
+ all_components['generate_dev_components']['generated_dev_components_text'],
14
+ all_components['generate_dev_components']['generated_dev_components_markdown'],
15
+ page_progress_update
16
+ ],
17
+ )
18
+
19
+ step_buttons['Step 2 : Planning & Testing Mandays'].click(
20
+ fn=v4_generate_mandays_and_quotation,
21
+ inputs=[
22
+ all_components['generate_plan_test_components']['generated_plan_test_components_text'],
23
+ all_components['generate_dev_components']['generated_dev_components_text']
24
+ ],
25
+ outputs=[
26
+ all_components['generate_plan_test_mandays']['generated_plan_test_mandays_dataframe'],
27
+ all_components['generate_dev_mandays']['generated_dev_mandays_dataframe'],
28
+ page_progress_update,
29
+ quotation_cost
30
+ ],
31
+ )
32
+
33
+ step_buttons['Step 3 : SOW Doc'].click(
34
+ fn=v4_generate_sow,
35
+ inputs=[
36
+ all_components['generate_prd']['generated_prd_text'],
37
+ all_components['generate_plan_test_components']['generated_plan_test_components_text'],
38
+ all_components['generate_dev_components']['generated_dev_components_text'],
39
+ quotation_cost
40
+ ],
41
+ outputs=[
42
+ all_components['generate_BD_SOW']['generated_BD_SOW_text'],
43
+ all_components['generate_BD_SOW']['generated_BD_SOW_markdown'],
44
+ all_components['generate_Tech_SOW']['generated_Tech_SOW_text'],
45
+ all_components['generate_Tech_SOW']['generated_Tech_SOW_markdown'],
46
+ page_progress_update
47
+ ],
48
+ )