ICAS03 commited on
Commit
8b8f8c6
·
1 Parent(s): 620c3f1

- save outputs into dbs

Browse files
Files changed (4) hide show
  1. Project.py +57 -95
  2. app.py +1 -1
  3. common_functions_v4.py +74 -18
  4. page_prompts_config.py +7 -7
Project.py CHANGED
@@ -1,5 +1,6 @@
1
  from typing import Any, Dict
2
  from common_functions_v4 import *
 
3
  from page_prompts_config import PROMPTS, ModelType
4
  from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
5
  import openai
@@ -55,6 +56,10 @@ class Project:
55
  self.component_list = []
56
  self.project_detail = []
57
 
 
 
 
 
58
  # Initialize all prompt outputs as attributes
59
  for config in PROMPTS.values():
60
  for output in config.outputs:
@@ -76,6 +81,10 @@ class Project:
76
  'mvp_components': lambda self: self.mvp_components,
77
  'revised_mandays_estimates': lambda self: self.revised_mandays,
78
  'formatted_mvp_mandays': lambda self: self.formatted_mvp_mandays,
 
 
 
 
79
  }
80
 
81
  def execute_prompt(self, prompt_name: str, input_variables: Dict[str, Any] = None) -> str:
@@ -180,16 +189,13 @@ class Project:
180
  "project_detail": self.get_project_detail()
181
  }
182
  )
 
183
 
184
  #####################################################################
185
  def generate_prd_and_components(self, progress=gr.Progress()):
186
  """Generate PRD and components from project details"""
187
 
188
  # Step 1 : Log & Generate PRD
189
- try:
190
- log_prompt_execution(PROMPTS['generate_prd'].step, PROMPTS['generate_prd'].description, PROMPTS["generate_prd"].prompt)
191
- except Exception as e:
192
- print(f"Error logging PRD generation: {str(e)}")
193
  progress(0, desc="Progress 1: Generating PRD from Q&A...")
194
  self.generated_prd = self.execute_prompt(
195
  "generate_prd",
@@ -197,14 +203,9 @@ class Project:
197
  "project_detail": self.get_project_detail()
198
  }
199
  )
200
-
 
201
  # Step 2 : Log & Generate Plan & Test Components
202
- try:
203
- log_prompt_execution(PROMPTS['generate_plan_test_components'].step, PROMPTS['generate_plan_test_components'].description, PROMPTS["generate_plan_test_components"].prompt)
204
- except Exception as e:
205
- print(f"Error logging PRD generation: {str(e)}")
206
-
207
- # Step 2: Generate Components
208
  progress(0.4, desc="Progress 2: Generating Planning & Testing Component...")
209
  self.plan_test_component_list = self.execute_prompt(
210
  "generate_plan_test_components",
@@ -212,13 +213,9 @@ class Project:
212
  "generated_prd": self.generated_prd
213
  }
214
  )
215
-
 
216
  # Step 3 : Log & Generate Dev Components
217
- try:
218
- log_prompt_execution(PROMPTS['generate_dev_components'].step, PROMPTS['generate_dev_components'].description, PROMPTS["generate_dev_components"].prompt)
219
- except Exception as e:
220
- print(f"Error logging Plan Test Components generation: {str(e)}")
221
-
222
  progress(0.7, desc="Progress 3: Generating Development Components...")
223
  self.dev_component_list = self.execute_prompt(
224
  "generate_dev_components",
@@ -226,13 +223,9 @@ class Project:
226
  "generated_prd": self.generated_prd
227
  }
228
  )
229
-
 
230
  # Step 4 : Log & Reformat Dev Components
231
- try:
232
- log_prompt_execution(PROMPTS['reformat_dev_components'].step, PROMPTS['reformat_dev_components'].description, PROMPTS["reformat_dev_components"].prompt)
233
- except Exception as e:
234
- print(f"Error logging Development Components generation: {str(e)}")
235
-
236
  progress(0.8, desc="Progress 4: Reformatting Development Components...")
237
  self.reformatted_dev_component_list = self.execute_prompt(
238
  "reformat_dev_components",
@@ -240,6 +233,7 @@ class Project:
240
  "generated_dev_components": self.dev_component_list
241
  }
242
  )
 
243
 
244
  progress(1.0, desc="Complete!")
245
 
@@ -263,11 +257,6 @@ class Project:
263
  self.reformatted_dev_components = dev_component
264
 
265
  # Step 1 : Log & Generate Plan & Test Mandays
266
- try:
267
- log_prompt_execution(PROMPTS['generate_plan_test_mandays'].step, PROMPTS['generate_plan_test_mandays'].sub_step, PROMPTS["generate_plan_test_mandays"].prompt)
268
- except Exception as e:
269
- print(f"Error logging Plan Test Mandays generation: {str(e)}")
270
-
271
  progress(0.4, desc="Progress 1: Generating Mandays for Plan & Test Components...")
272
  self.plan_test_mandays = self.execute_prompt(
273
  "generate_plan_test_mandays",
@@ -275,13 +264,9 @@ class Project:
275
  "generated_plan_test_components": plan_test_component
276
  }
277
  )
278
-
 
279
  # Step 2 : Log & Generate Dev Mandays
280
- try:
281
- log_prompt_execution(PROMPTS['generate_dev_mandays'].step, PROMPTS['generate_dev_mandays'].sub_step, PROMPTS["generate_dev_mandays"].prompt)
282
- except Exception as e:
283
- print(f"Error logging Dev Mandays generation: {str(e)}")
284
-
285
  progress(0.8, desc="Progress 2: Generating Mandays for Dev Components...")
286
  self.dev_mandays = self.execute_prompt(
287
  "generate_dev_mandays",
@@ -289,7 +274,8 @@ class Project:
289
  "reformatted_dev_components": dev_component
290
  }
291
  )
292
-
 
293
  progress(1.0, desc="Complete!")
294
  return [
295
  pd.read_csv(StringIO(self.plan_test_mandays), on_bad_lines='skip'),
@@ -301,11 +287,6 @@ class Project:
301
  """Step 2.2: Analyze components"""
302
 
303
  # Step 1 : Log & Analyze Plan & Test Mandays
304
- try:
305
- log_prompt_execution(PROMPTS['analyze_planning_testing_mandays'].step, PROMPTS['analyze_planning_testing_mandays'].sub_step, PROMPTS["analyze_planning_testing_mandays"].prompt)
306
- except Exception as e:
307
- print(f"Error logging Planning & Testing Components analysis: {str(e)}")
308
-
309
  progress(0.4, desc="Progress 1: Analyzing Planning & Testing Components...")
310
  self.planning_testing_components = self.execute_prompt(
311
  "analyze_planning_testing_mandays",
@@ -313,13 +294,9 @@ class Project:
313
  "generated_plan_test_mandays": self.plan_test_mandays
314
  }
315
  )
 
316
 
317
  # Step 2 : Log & Analyze Dev Mandays
318
- try:
319
- log_prompt_execution(PROMPTS['analyze_development_mandays'].step, PROMPTS['analyze_development_mandays'].sub_step, PROMPTS["analyze_development_mandays"].prompt)
320
- except Exception as e:
321
- print(f"Error logging Development Components analysis: {str(e)}")
322
-
323
  progress(0.8, desc="Progress 2: Analyzing Development Components...")
324
  self.development_components = self.execute_prompt(
325
  "analyze_development_mandays",
@@ -327,7 +304,8 @@ class Project:
327
  "generated_dev_mandays": self.dev_mandays
328
  }
329
  )
330
-
 
331
  # Combine the results from both analyses
332
  self.mvp_components = {
333
  "planning_testing": self.planning_testing_components,
@@ -341,11 +319,6 @@ class Project:
341
  """Step 2.3: Generate MVP estimates"""
342
 
343
  # Step 1 : Log & Recalculate Mandays
344
- try:
345
- log_prompt_execution(PROMPTS['recalculate_mandays'].step, PROMPTS['recalculate_mandays'].sub_step, PROMPTS["recalculate_mandays"].prompt)
346
- except Exception as e:
347
- print(f"Error logging MVP Mandays recalculation: {str(e)}")
348
-
349
  progress(0.3, desc="Progress 1: Recalculating Mandays...")
350
  self.revised_mandays = self.execute_prompt(
351
  "recalculate_mandays",
@@ -354,32 +327,28 @@ class Project:
354
  "generated_prd": self.generated_prd
355
  }
356
  )
 
357
 
358
  # Step 2 : Log & Generate MVP Mandays
359
- try:
360
- log_prompt_execution(PROMPTS['generate_MVP_mandays'].step, PROMPTS['generate_MVP_mandays'].sub_step, PROMPTS["generate_MVP_mandays"].prompt)
361
- except Exception as e:
362
- print(f"Error logging MVP Mandays generation: {str(e)}")
363
-
364
  progress(0.6, desc="Progress 2: Formatting MVP Mandays...")
365
- mvp_mandays = self.execute_prompt(
366
  "generate_MVP_mandays",
367
  {
368
  "revised_mandays_estimates": self.revised_mandays
369
  }
370
  )
371
-
372
  try:
373
  # Process MVP mandays into dataframe
374
- mvp_mandays = (mvp_mandays
375
  .replace("```csv\n", "")
376
  .replace("```\n", "")
377
  .replace("```", "")
378
  .strip())
379
 
380
- sections = (mvp_mandays.split("---SECTION BREAK---")
381
- if "---SECTION BREAK---" in mvp_mandays
382
- else mvp_mandays.split("\n---\n"))
383
 
384
  if len(sections) != 2:
385
  raise ValueError("MVP mandays output must contain exactly two sections")
@@ -411,7 +380,7 @@ class Project:
411
  for df in [plan_test_df, dev_df, mvp_df]:
412
  df['mandays'] = pd.to_numeric(df['mandays'].replace('', '0'), errors='coerce').fillna(0)
413
 
414
- total_mandays, total_cost, estimated_months, mvp_mandays, mvp_cost, mvp_estimated_months = calculate_mandays_and_costs(plan_test_df, dev_df, mvp_df)
415
 
416
  cost_summary = f"""
417
  Original Estimate:
@@ -420,18 +389,19 @@ class Project:
420
  ({estimated_months:.2}months)
421
 
422
  MVP-Based Estimate:
423
- Total Mandays: {mvp_mandays:.2f}
424
  Total Cost: ${mvp_cost:,.2f}
425
  ({mvp_estimated_months:.2}months)
426
  """
427
-
 
428
  progress(1.0, desc="Complete!")
429
  return [mvp_df, "Generated MVP estimates!", cost_summary]
430
 
431
  except Exception as e:
432
  return [None, f"Error processing MVP mandays: {str(e)}", "Error calculating costs"]
433
 
434
- ################################################################################################
435
  def generate_sow(self, generated_prd, plan_test_component, dev_component, mvp_components , cost, progress=gr.Progress()):
436
  self.generated_plan_test_components = plan_test_component
437
  self.reformatted_dev_components = dev_component
@@ -439,28 +409,19 @@ class Project:
439
  self.mvp_components = mvp_components
440
 
441
  # Step 1 : Log & Generate MVP PRD
442
- try:
443
- log_prompt_execution(PROMPTS['generate_mvp_prd'].step, PROMPTS['generate_mvp_prd'].description, PROMPTS["generate_mvp_prd"].prompt)
444
- except Exception as e:
445
- print(f"Error logging MVP PRD generation: {str(e)}")
446
-
447
  progress(0.2, desc="Progress 1: Drafting MVP PRD")
448
- mvp_prd = self.execute_prompt(
449
  "generate_mvp_prd",
450
  {
451
  "generated_prd": generated_prd,
452
  "mvp_components": mvp_components
453
  }
454
  )
 
455
 
456
  # Step 2 : Log & Generate Business SOW
457
- try:
458
- log_prompt_execution(PROMPTS['generate_BD_SOW'].step, PROMPTS['generate_BD_SOW'].description, PROMPTS["generate_BD_SOW"].prompt)
459
- except Exception as e:
460
- print(f"Error logging MVP PRD generation: {str(e)}")
461
-
462
  progress(0.4, desc="Progress 2: Drafting SOW")
463
- general_sow = self.execute_prompt(
464
  "generate_BD_SOW",
465
  {
466
  "generated_prd": generated_prd,
@@ -469,15 +430,11 @@ class Project:
469
  "quotation_cost": cost
470
  }
471
  )
472
-
 
473
  # Step 3 : Log & Generate Technical SOW
474
- try:
475
- log_prompt_execution(PROMPTS['generate_Tech_SOW'].step, PROMPTS['generate_Tech_SOW'].description, PROMPTS["generate_Tech_SOW"].prompt)
476
- except Exception as e:
477
- print(f"Error logging Technical SOW generation: {str(e)}")
478
-
479
  progress(0.8, desc="Progress 3: Drafting Technical SOW")
480
- detailed_sow_json = self.execute_prompt(
481
  "generate_Tech_SOW",
482
  {
483
  "generated_plan_test_components": plan_test_component,
@@ -485,23 +442,24 @@ class Project:
485
  "mvp_components": mvp_components,
486
  }
487
  )
488
-
 
489
  try:
490
  # Parse detailed_sow into a JSON object
491
- detailed_sow_json = json.loads(detailed_sow_json)
492
 
493
  # Extract required fields
494
- scope_summary = detailed_sow_json.get("scope_summary", "")
495
- modules_and_functional_requirements = detailed_sow_json.get("modules_and_functional_requirements", "")
496
- out_of_scope = detailed_sow_json.get("out_of_scope", "")
497
- system_flow = detailed_sow_json.get("system_flow", "")
498
 
499
  # Combine all fields into detailed_sow
500
  detailed_sow = f"{scope_summary}\n\n{modules_and_functional_requirements}\n\n{out_of_scope}\n\n{system_flow}"
501
 
502
  # Create final SOW
503
- final_general_sow = f"**Hi , some sections of this SOW is generated seprately from the main draft. youll have to move this to the right spot manually ;)\n **Project Quotation:**\n{cost}\n{modules_and_functional_requirements}\n\n {general_sow}\nEOF"
504
- return [mvp_prd , mvp_prd ,final_general_sow, final_general_sow, detailed_sow, detailed_sow, "Generated SOW!"]
505
  except Exception as e:
506
  return ["Error generating SOW", "Error: " + str(e), "Failed to generate SOW"]
507
 
@@ -515,7 +473,11 @@ class Project:
515
  mvp_df['mandays'] = pd.to_numeric(mvp_df['mandays'].replace('', '0'), errors='coerce').fillna(0)
516
 
517
  # Calculate totals
518
- total_mandays, total_cost, estimated_months, mvp_mandays, mvp_cost, mvp_estimated_months = calculate_mandays_and_costs(plan_test_df, dev_df, mvp_df)
 
 
 
 
519
 
520
  status_message = f"Successfully Updated Quotation. SessionID:{self.session_id}"
521
  cost_summary = f"""
@@ -525,7 +487,7 @@ class Project:
525
  ({estimated_months:.2}months)
526
 
527
  MVP-Based Estimate:
528
- Total Mandays: {mvp_mandays:.2f}
529
  Total Cost: ${mvp_cost:,.2f}
530
  ({mvp_estimated_months:.2}months)"""
531
 
 
1
  from typing import Any, Dict
2
  from common_functions_v4 import *
3
+ from common_functions_v4 import log_prompt
4
  from page_prompts_config import PROMPTS, ModelType
5
  from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
6
  import openai
 
56
  self.component_list = []
57
  self.project_detail = []
58
 
59
+ self.plan_test_mandays_output_id = None
60
+ self.dev_mandays_output_id = None
61
+ self.mvp_mandays_output_id = None
62
+
63
  # Initialize all prompt outputs as attributes
64
  for config in PROMPTS.values():
65
  for output in config.outputs:
 
81
  'mvp_components': lambda self: self.mvp_components,
82
  'revised_mandays_estimates': lambda self: self.revised_mandays,
83
  'formatted_mvp_mandays': lambda self: self.formatted_mvp_mandays,
84
+ 'generated_mvp_prd': lambda self: self.mvp_prd,
85
+ 'general_sow': lambda self: self.general_sow,
86
+ 'detailed_sow_json': lambda self: self.detailed_sow_json,
87
+ 'mvp_mandays': lambda self: self.mvp_mandays,
88
  }
89
 
90
  def execute_prompt(self, prompt_name: str, input_variables: Dict[str, Any] = None) -> str:
 
189
  "project_detail": self.get_project_detail()
190
  }
191
  )
192
+
193
 
194
  #####################################################################
195
  def generate_prd_and_components(self, progress=gr.Progress()):
196
  """Generate PRD and components from project details"""
197
 
198
  # Step 1 : Log & Generate PRD
 
 
 
 
199
  progress(0, desc="Progress 1: Generating PRD from Q&A...")
200
  self.generated_prd = self.execute_prompt(
201
  "generate_prd",
 
203
  "project_detail": self.get_project_detail()
204
  }
205
  )
206
+ log_prompt(PROMPTS['generate_prd'].step, PROMPTS['generate_prd'].description, PROMPTS["generate_prd"].prompt, self.generated_prd)
207
+
208
  # Step 2 : Log & Generate Plan & Test Components
 
 
 
 
 
 
209
  progress(0.4, desc="Progress 2: Generating Planning & Testing Component...")
210
  self.plan_test_component_list = self.execute_prompt(
211
  "generate_plan_test_components",
 
213
  "generated_prd": self.generated_prd
214
  }
215
  )
216
+ log_prompt(PROMPTS['generate_plan_test_components'].step, PROMPTS['generate_plan_test_components'].description, PROMPTS["generate_plan_test_components"].prompt, self.plan_test_component_list)
217
+
218
  # Step 3 : Log & Generate Dev Components
 
 
 
 
 
219
  progress(0.7, desc="Progress 3: Generating Development Components...")
220
  self.dev_component_list = self.execute_prompt(
221
  "generate_dev_components",
 
223
  "generated_prd": self.generated_prd
224
  }
225
  )
226
+ log_prompt(PROMPTS['generate_dev_components'].step, PROMPTS['generate_dev_components'].description, PROMPTS["generate_dev_components"].prompt, self.dev_component_list)
227
+
228
  # Step 4 : Log & Reformat Dev Components
 
 
 
 
 
229
  progress(0.8, desc="Progress 4: Reformatting Development Components...")
230
  self.reformatted_dev_component_list = self.execute_prompt(
231
  "reformat_dev_components",
 
233
  "generated_dev_components": self.dev_component_list
234
  }
235
  )
236
+ log_prompt(PROMPTS['reformat_dev_components'].step, PROMPTS['reformat_dev_components'].description, PROMPTS["reformat_dev_components"].prompt, self.reformatted_dev_component_list)
237
 
238
  progress(1.0, desc="Complete!")
239
 
 
257
  self.reformatted_dev_components = dev_component
258
 
259
  # Step 1 : Log & Generate Plan & Test Mandays
 
 
 
 
 
260
  progress(0.4, desc="Progress 1: Generating Mandays for Plan & Test Components...")
261
  self.plan_test_mandays = self.execute_prompt(
262
  "generate_plan_test_mandays",
 
264
  "generated_plan_test_components": plan_test_component
265
  }
266
  )
267
+ self.plan_test_mandays_output_id = log_prompt(PROMPTS['generate_plan_test_mandays'].step, PROMPTS['generate_plan_test_mandays'].description, PROMPTS["generate_plan_test_mandays"].prompt, self.plan_test_mandays)
268
+
269
  # Step 2 : Log & Generate Dev Mandays
 
 
 
 
 
270
  progress(0.8, desc="Progress 2: Generating Mandays for Dev Components...")
271
  self.dev_mandays = self.execute_prompt(
272
  "generate_dev_mandays",
 
274
  "reformatted_dev_components": dev_component
275
  }
276
  )
277
+ self.dev_mandays_output_id = log_prompt(PROMPTS['generate_dev_mandays'].step, PROMPTS['generate_dev_mandays'].description, PROMPTS["generate_dev_mandays"].prompt, self.dev_mandays)
278
+
279
  progress(1.0, desc="Complete!")
280
  return [
281
  pd.read_csv(StringIO(self.plan_test_mandays), on_bad_lines='skip'),
 
287
  """Step 2.2: Analyze components"""
288
 
289
  # Step 1 : Log & Analyze Plan & Test Mandays
 
 
 
 
 
290
  progress(0.4, desc="Progress 1: Analyzing Planning & Testing Components...")
291
  self.planning_testing_components = self.execute_prompt(
292
  "analyze_planning_testing_mandays",
 
294
  "generated_plan_test_mandays": self.plan_test_mandays
295
  }
296
  )
297
+ log_prompt(PROMPTS['analyze_planning_testing_mandays'].step, PROMPTS['analyze_planning_testing_mandays'].description, PROMPTS["analyze_planning_testing_mandays"].prompt, self.planning_testing_components)
298
 
299
  # Step 2 : Log & Analyze Dev Mandays
 
 
 
 
 
300
  progress(0.8, desc="Progress 2: Analyzing Development Components...")
301
  self.development_components = self.execute_prompt(
302
  "analyze_development_mandays",
 
304
  "generated_dev_mandays": self.dev_mandays
305
  }
306
  )
307
+ log_prompt(PROMPTS['analyze_development_mandays'].step, PROMPTS['analyze_development_mandays'].description, PROMPTS["analyze_development_mandays"].prompt, self.development_components)
308
+
309
  # Combine the results from both analyses
310
  self.mvp_components = {
311
  "planning_testing": self.planning_testing_components,
 
319
  """Step 2.3: Generate MVP estimates"""
320
 
321
  # Step 1 : Log & Recalculate Mandays
 
 
 
 
 
322
  progress(0.3, desc="Progress 1: Recalculating Mandays...")
323
  self.revised_mandays = self.execute_prompt(
324
  "recalculate_mandays",
 
327
  "generated_prd": self.generated_prd
328
  }
329
  )
330
+ log_prompt(PROMPTS['recalculate_mandays'].step, PROMPTS['recalculate_mandays'].description, PROMPTS["recalculate_mandays"].prompt, self.revised_mandays)
331
 
332
  # Step 2 : Log & Generate MVP Mandays
 
 
 
 
 
333
  progress(0.6, desc="Progress 2: Formatting MVP Mandays...")
334
+ self.mvp_mandays = self.execute_prompt(
335
  "generate_MVP_mandays",
336
  {
337
  "revised_mandays_estimates": self.revised_mandays
338
  }
339
  )
340
+
341
  try:
342
  # Process MVP mandays into dataframe
343
+ self.mvp_mandays = (self.mvp_mandays
344
  .replace("```csv\n", "")
345
  .replace("```\n", "")
346
  .replace("```", "")
347
  .strip())
348
 
349
+ sections = (self.mvp_mandays.split("---SECTION BREAK---")
350
+ if "---SECTION BREAK---" in self.mvp_mandays
351
+ else self.mvp_mandays.split("\n---\n"))
352
 
353
  if len(sections) != 2:
354
  raise ValueError("MVP mandays output must contain exactly two sections")
 
380
  for df in [plan_test_df, dev_df, mvp_df]:
381
  df['mandays'] = pd.to_numeric(df['mandays'].replace('', '0'), errors='coerce').fillna(0)
382
 
383
+ total_mandays, total_cost, estimated_months, total_mvp_mandays, mvp_cost, mvp_estimated_months = calculate_mandays_and_costs(plan_test_df, dev_df, mvp_df)
384
 
385
  cost_summary = f"""
386
  Original Estimate:
 
389
  ({estimated_months:.2}months)
390
 
391
  MVP-Based Estimate:
392
+ Total Mandays: {total_mvp_mandays:.2f}
393
  Total Cost: ${mvp_cost:,.2f}
394
  ({mvp_estimated_months:.2}months)
395
  """
396
+ self.mvp_mandays_output_id = log_prompt(PROMPTS['generate_MVP_mandays'].step, PROMPTS['generate_MVP_mandays'].description, PROMPTS["generate_MVP_mandays"].prompt, self.mvp_mandays)
397
+
398
  progress(1.0, desc="Complete!")
399
  return [mvp_df, "Generated MVP estimates!", cost_summary]
400
 
401
  except Exception as e:
402
  return [None, f"Error processing MVP mandays: {str(e)}", "Error calculating costs"]
403
 
404
+ ################################################################################################
405
  def generate_sow(self, generated_prd, plan_test_component, dev_component, mvp_components , cost, progress=gr.Progress()):
406
  self.generated_plan_test_components = plan_test_component
407
  self.reformatted_dev_components = dev_component
 
409
  self.mvp_components = mvp_components
410
 
411
  # Step 1 : Log & Generate MVP PRD
 
 
 
 
 
412
  progress(0.2, desc="Progress 1: Drafting MVP PRD")
413
+ self.mvp_prd = self.execute_prompt(
414
  "generate_mvp_prd",
415
  {
416
  "generated_prd": generated_prd,
417
  "mvp_components": mvp_components
418
  }
419
  )
420
+ log_prompt(PROMPTS['generate_mvp_prd'].step, PROMPTS['generate_mvp_prd'].description, PROMPTS["generate_mvp_prd"].prompt, self.mvp_prd)
421
 
422
  # Step 2 : Log & Generate Business SOW
 
 
 
 
 
423
  progress(0.4, desc="Progress 2: Drafting SOW")
424
+ self.general_sow = self.execute_prompt(
425
  "generate_BD_SOW",
426
  {
427
  "generated_prd": generated_prd,
 
430
  "quotation_cost": cost
431
  }
432
  )
433
+ log_prompt(PROMPTS['generate_BD_SOW'].step, PROMPTS['generate_BD_SOW'].description, PROMPTS["generate_BD_SOW"].prompt, self.general_sow)
434
+
435
  # Step 3 : Log & Generate Technical SOW
 
 
 
 
 
436
  progress(0.8, desc="Progress 3: Drafting Technical SOW")
437
+ self.detailed_sow_json = self.execute_prompt(
438
  "generate_Tech_SOW",
439
  {
440
  "generated_plan_test_components": plan_test_component,
 
442
  "mvp_components": mvp_components,
443
  }
444
  )
445
+ log_prompt(PROMPTS['generate_Tech_SOW'].step, PROMPTS['generate_Tech_SOW'].description, PROMPTS["generate_Tech_SOW"].prompt, self.detailed_sow_json)
446
+
447
  try:
448
  # Parse detailed_sow into a JSON object
449
+ self.detailed_sow_json = json.loads(self.detailed_sow_json)
450
 
451
  # Extract required fields
452
+ scope_summary = self.detailed_sow_json.get("scope_summary", "")
453
+ modules_and_functional_requirements = self.detailed_sow_json.get("modules_and_functional_requirements", "")
454
+ out_of_scope = self.detailed_sow_json.get("out_of_scope", "")
455
+ system_flow = self.detailed_sow_json.get("system_flow", "")
456
 
457
  # Combine all fields into detailed_sow
458
  detailed_sow = f"{scope_summary}\n\n{modules_and_functional_requirements}\n\n{out_of_scope}\n\n{system_flow}"
459
 
460
  # Create final SOW
461
+ final_general_sow = f"**Hi , some sections of this SOW is generated seprately from the main draft. youll have to move this to the right spot manually ;)\n **Project Quotation:**\n{cost}\n{modules_and_functional_requirements}\n\n {self.general_sow}\nEOF"
462
+ return [self.mvp_prd , self.mvp_prd ,final_general_sow, final_general_sow, detailed_sow, detailed_sow, "Generated SOW!"]
463
  except Exception as e:
464
  return ["Error generating SOW", "Error: " + str(e), "Failed to generate SOW"]
465
 
 
473
  mvp_df['mandays'] = pd.to_numeric(mvp_df['mandays'].replace('', '0'), errors='coerce').fillna(0)
474
 
475
  # Calculate totals
476
+ total_mandays, total_cost, estimated_months, total_mvp_mandays, mvp_cost, mvp_estimated_months = calculate_mandays_and_costs(plan_test_df, dev_df, mvp_df)
477
+
478
+ update_prompt_execution_output(self.plan_test_mandays_output_id , plan_test_df)
479
+ update_prompt_execution_output(self.dev_mandays_output_id , dev_df)
480
+ update_prompt_execution_output(self.mvp_mandays_output_id , mvp_df)
481
 
482
  status_message = f"Successfully Updated Quotation. SessionID:{self.session_id}"
483
  cost_summary = f"""
 
487
  ({estimated_months:.2}months)
488
 
489
  MVP-Based Estimate:
490
+ Total Mandays: {total_mvp_mandays:.2f}
491
  Total Cost: ${mvp_cost:,.2f}
492
  ({mvp_estimated_months:.2}months)"""
493
 
app.py CHANGED
@@ -259,7 +259,7 @@ def create_quotation_generator_section():
259
 
260
  with gr.Column(scale=1):
261
  quotation_cost = gr.Textbox(label="Cost Summary", lines=3, interactive=False)
262
- page_recalc_btn = gr.Button("Recalculate")
263
  page_notes_box = gr.Textbox(
264
  label="Notes",
265
  lines=3,
 
259
 
260
  with gr.Column(scale=1):
261
  quotation_cost = gr.Textbox(label="Cost Summary", lines=3, interactive=False)
262
+ page_recalc_btn = gr.Button("Recalculate Cost")
263
  page_notes_box = gr.Textbox(
264
  label="Notes",
265
  lines=3,
common_functions_v4.py CHANGED
@@ -1,4 +1,5 @@
1
  import uuid
 
2
  import pandas as pd
3
  from io import StringIO
4
  # from pathlib import Path
@@ -327,7 +328,7 @@ def fetch_session(session_id):
327
  # return "", "", f"Error fetching session: {str(e)}", "", ""
328
 
329
  def log_prompt_execution(step_name, sub_step_name, prompt_text):
330
- """Log prompt execution to the database with a randomly generated prompt_id, avoiding duplicates for the same prompt text."""
331
  created_at = datetime.datetime.now()
332
  session_id = state.quotation_project.session_id
333
 
@@ -340,27 +341,82 @@ def log_prompt_execution(step_name, sub_step_name, prompt_text):
340
  conn = get_db_connection()
341
  cur = conn.cursor()
342
 
343
- # Check for existing record with the same prompt text
344
  cur.execute("""
345
- SELECT COUNT(*) FROM prompts
346
- WHERE session_id = %s AND step_name = %s AND sub_step_name = %s AND prompt = %s
347
- """, (session_id, step_name, sub_step_name, prompt_text))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
 
349
- count = cur.fetchone()[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
 
351
- if count == 0: # Only insert if no existing record with the same prompt text is found
352
- cur.execute("""
353
- INSERT INTO prompts (session_id, prompt_id, step_name, sub_step_name, prompt, created_at)
354
- VALUES (%s, %s, %s, %s, %s, %s)
355
- """, (session_id, prompt_id, step_name, sub_step_name, prompt_text, created_at))
356
- conn.commit()
357
- return prompt_id
358
- else:
359
- print("Duplicate prompt text found for the same session and step. Skipping insertion.")
360
- return None
361
  except Exception as e:
362
- print(f"Error logging prompt execution: {str(e)}")
363
  return None
364
  finally:
365
  if conn:
366
- conn.close()
 
 
 
 
 
 
 
 
 
 
 
1
  import uuid
2
+ import numpy as np
3
  import pandas as pd
4
  from io import StringIO
5
  # from pathlib import Path
 
328
  # return "", "", f"Error fetching session: {str(e)}", "", ""
329
 
330
  def log_prompt_execution(step_name, sub_step_name, prompt_text):
331
+ """Log prompt execution to the database with a randomly generated prompt_id."""
332
  created_at = datetime.datetime.now()
333
  session_id = state.quotation_project.session_id
334
 
 
341
  conn = get_db_connection()
342
  cur = conn.cursor()
343
 
344
+ # Directly insert the prompt execution without checking for duplicates
345
  cur.execute("""
346
+ INSERT INTO prompts (session_id, prompt_id, step_name, sub_step_name, prompt, created_at)
347
+ VALUES (%s, %s, %s, %s, %s, %s)
348
+ """, (session_id, prompt_id, step_name, sub_step_name, prompt_text, created_at))
349
+ conn.commit()
350
+ return prompt_id
351
+ except Exception as e:
352
+ print(f"Error logging prompt execution: {str(e)}")
353
+ return None
354
+ finally:
355
+ if conn:
356
+ conn.close()
357
+
358
+ def log_prompt_execution_output(prompt_id, output):
359
+ """Save prompt execution output to the database"""
360
+
361
+ created_at = datetime.datetime.now()
362
+ output_id = str(uuid.uuid4())
363
+ try:
364
+ # Establish a database connection
365
+ conn = get_db_connection()
366
+ cur = conn.cursor()
367
+
368
+ # Directly insert the output into the outputs table without checking for duplicates
369
+ cur.execute("""
370
+ INSERT INTO outputs (output_id, prompt_id, output, created_at)
371
+ VALUES (%s, %s, %s, %s)
372
+ """, (output_id, prompt_id, output, created_at))
373
+
374
+ # Commit the transaction
375
+ conn.commit()
376
 
377
+ except Exception as e:
378
+ print(f"Error logging prompt execution output: {str(e)}")
379
+ return None
380
+ finally:
381
+ if conn:
382
+ conn.close()
383
+
384
+ def update_prompt_execution_output(output_id, output):
385
+ """Update prompt execution output in the database based on output_id"""
386
+
387
+ created_at = datetime.datetime.now()
388
+ try:
389
+ # Establish a database connection
390
+ conn = get_db_connection()
391
+ cur = conn.cursor()
392
+
393
+ # Convert output to a standard Python type if it's a numpy type
394
+ if isinstance(output, (np.integer, float)):
395
+ output = int(output) if isinstance(output, np.integer) else float(output)
396
+
397
+ # Update the existing output for the given output_id
398
+ cur.execute("""
399
+ UPDATE outputs
400
+ SET output = %s, created_at = %s
401
+ WHERE output_id = %s
402
+ """, (output, created_at, output_id))
403
+
404
+ # Commit the transaction
405
+ conn.commit()
406
 
 
 
 
 
 
 
 
 
 
 
407
  except Exception as e:
408
+ print(f"Error updating prompt execution output: {str(e)}")
409
  return None
410
  finally:
411
  if conn:
412
+ conn.close()
413
+
414
+ def log_prompt(prompt_name: str, prompt_description: str, prompt: str, output: Any) -> None:
415
+ """Log prompt execution and its output"""
416
+ try:
417
+ prompt_id = log_prompt_execution(prompt_name, prompt_description, prompt)
418
+ output_id = log_prompt_execution_output(prompt_id, output)
419
+ return output_id
420
+ except Exception as e:
421
+ print(f"Error logging {prompt_name} generation: {str(e)}")
422
+
page_prompts_config.py CHANGED
@@ -505,7 +505,7 @@ PROMPTS = {
505
  inputs=["generated_plan_test_components"],
506
  outputs=["generated_plan_test_mandays"],
507
  model=ModelType.O1_MINI,
508
- description="Generate planning and testing mandays",
509
  step="Step 2 : Mandays & Quotation",
510
  sub_step="Step 2.1 : Generate Mandays",
511
  ui={
@@ -564,7 +564,7 @@ PROMPTS = {
564
  inputs=["reformatted_dev_components"],
565
  outputs=["generated_dev_mandays"],
566
  model=ModelType.O1_MINI,
567
- description="Generate development mandays",
568
  step="Step 2 : Mandays & Quotation",
569
  sub_step="Step 2.1 : Generate Mandays",
570
  ui={
@@ -776,7 +776,7 @@ PROMPTS = {
776
  inputs=["generated_plan_test_components", "reformatted_dev_components" ,"mvp_components"],
777
  outputs=["generated_Tech_SOW"],
778
  model=ModelType.O1_MINI,
779
- description="Step 3.3 : Generate Test SOW",
780
  step="Step 3 : Final Documentation",
781
  ui={
782
  "Tech_SOW_prompt_editor": UIConfig(
@@ -821,7 +821,7 @@ PROMPTS = {
821
  inputs=["generated_plan_test_mandays"],
822
  outputs=["identified_planning_testing_components"],
823
  model=ModelType.O1_MINI,
824
- description="Analyze planning and testing components",
825
  step="Step 2 : Mandays & Quotation",
826
  sub_step="Step 2.2 : Analyze Components",
827
  ui={
@@ -862,7 +862,7 @@ PROMPTS = {
862
  inputs=["generated_dev_mandays"],
863
  outputs=["identified_development_components"],
864
  model=ModelType.O1_MINI,
865
- description="Analyze development components",
866
  step="Step 2 : Mandays & Quotation",
867
  sub_step="Step 2.2 : Analyze Components",
868
  ui={
@@ -912,7 +912,7 @@ PROMPTS = {
912
  inputs=["identified_priority_components", "generated_prd"],
913
  outputs=["revised_mandays_estimates"],
914
  model=ModelType.O1_MINI,
915
- description="Recalculate Mandays",
916
  step="Step 2 : Mandays & Quotation",
917
  sub_step="Step 2.3 : Recalculate MVP Mandays",
918
  ui={
@@ -955,7 +955,7 @@ PROMPTS = {
955
  inputs=["revised_mandays_estimates"],
956
  outputs=["generated_MVP_mandays"],
957
  model=ModelType.O1_MINI,
958
- description="Generate MVP Mandays",
959
  step="Step 2 : Mandays & Quotation",
960
  sub_step="Step 2.3 : Recalculate MVP Mandays",
961
  ui={
 
505
  inputs=["generated_plan_test_components"],
506
  outputs=["generated_plan_test_mandays"],
507
  model=ModelType.O1_MINI,
508
+ description="Step 2.1 : Generate planning and testing mandays",
509
  step="Step 2 : Mandays & Quotation",
510
  sub_step="Step 2.1 : Generate Mandays",
511
  ui={
 
564
  inputs=["reformatted_dev_components"],
565
  outputs=["generated_dev_mandays"],
566
  model=ModelType.O1_MINI,
567
+ description="Step 2.1 : Generate development mandays",
568
  step="Step 2 : Mandays & Quotation",
569
  sub_step="Step 2.1 : Generate Mandays",
570
  ui={
 
776
  inputs=["generated_plan_test_components", "reformatted_dev_components" ,"mvp_components"],
777
  outputs=["generated_Tech_SOW"],
778
  model=ModelType.O1_MINI,
779
+ description="Step 3.3 : Generate Tech SOW",
780
  step="Step 3 : Final Documentation",
781
  ui={
782
  "Tech_SOW_prompt_editor": UIConfig(
 
821
  inputs=["generated_plan_test_mandays"],
822
  outputs=["identified_planning_testing_components"],
823
  model=ModelType.O1_MINI,
824
+ description="Step 2.2 : Analyze planning and testing components",
825
  step="Step 2 : Mandays & Quotation",
826
  sub_step="Step 2.2 : Analyze Components",
827
  ui={
 
862
  inputs=["generated_dev_mandays"],
863
  outputs=["identified_development_components"],
864
  model=ModelType.O1_MINI,
865
+ description="Step 2.2 : Analyze development components",
866
  step="Step 2 : Mandays & Quotation",
867
  sub_step="Step 2.2 : Analyze Components",
868
  ui={
 
912
  inputs=["identified_priority_components", "generated_prd"],
913
  outputs=["revised_mandays_estimates"],
914
  model=ModelType.O1_MINI,
915
+ description="Step 2.3 : Recalculate Mandays",
916
  step="Step 2 : Mandays & Quotation",
917
  sub_step="Step 2.3 : Recalculate MVP Mandays",
918
  ui={
 
955
  inputs=["revised_mandays_estimates"],
956
  outputs=["generated_MVP_mandays"],
957
  model=ModelType.O1_MINI,
958
+ description="Step 2.3 : Generate MVP Mandays",
959
  step="Step 2 : Mandays & Quotation",
960
  sub_step="Step 2.3 : Recalculate MVP Mandays",
961
  ui={