ICAS03 commited on
Commit
ff33505
·
1 Parent(s): 85edbe8

- Change to having 1 prompt executor

Browse files

- Deleted v3_functions
- Created a complete config file
This is phase 2. This is working. However , everything still does not dynamically change if config file were to change.

Project.py CHANGED
@@ -1,11 +1,12 @@
1
- from enum import Enum
2
- # from ProjectClient import Client,zus_coffee,ssm, game
3
- # from prompts import * #engage
4
  from page_prompts_v3 import *
5
  from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
6
  import openai
7
  from contextlib import contextmanager
8
- # import json
 
 
9
 
10
  @contextmanager
11
  def openai_session():
@@ -64,323 +65,295 @@ def call_4o_mini(prompt):
64
 
65
  except Exception as e:
66
  return f"Error generating output: {str(e)}"
67
-
68
- class ProjectType(Enum):
69
- Page = "Page"
70
- Sage = "Sage"
71
- Engage = "Engage"
72
-
73
- class Project:
74
- def __init__(self, project_type: ProjectType, session_id = None):
75
  self.project_type = project_type
76
  self.session_id = session_id
77
- # requirement_rubric, fetch from db
78
- # then retrive from here again, omit recalls to fetch the same thing over and over
79
- self.project_detail = []
80
- self.generated_prd = ""
81
-
82
- #v1 - cant delete cause it breaks things
83
- self.rubric = []
84
- self.rubric_section_names = []
85
- self.component_list = []
86
- self.component_csv = ""
87
- self.flared_csv = ""
88
-
89
- # Method 3 - Top Down Approach Variables
90
- # Further break things down based on phases
91
- # Plan & Test , Dev
92
- self.derived_plan_test_components = ""
93
- self.derived_dev_components = ""
94
- self.derived_plan_test_tasks = ""
95
- self.derived_dev_tasks = ""
96
- self.derived_baseunits = ""
97
- self.derived_mandays = ""
98
-
99
- self.quotation_cost = ""
100
-
101
- def reset_project(self):
102
- self.project_detail = []
103
- self.generated_prd = ""
104
-
105
- #v1 - cant delete cause it breaks things
106
  self.rubric = []
107
  self.rubric_section_names = []
108
  self.component_list = []
109
  self.component_csv = ""
110
  self.flared_csv = ""
111
-
112
- # Method 3 - Top Down Approach Variables
113
- # Further break things down based on phases
114
- # Plan & Test , Dev
115
- self.derived_plan_test_components = ""
116
- self.derived_dev_components = ""
117
- self.derived_plan_test_tasks = ""
118
- self.derived_dev_tasks = ""
119
- self.derived_baseunits = ""
120
- self.derived_mandays = ""
121
-
122
- self.quotation_cost = ""
123
-
124
- print("Successfully reser project variables")
125
-
126
-
127
- def set_rubric(self,rubric):
128
- self.rubric = rubric
129
-
130
- def set_component_csv(self,component_csv):
131
- self.component_csv = component_csv
132
-
133
- def is_active(self):
134
- return self.session_id != None
135
-
136
- def get_component_csv(self):
137
- return self.component_csv
138
-
139
- def set_component_list(self,component_list):
140
- self.component_list = component_list
141
-
142
- def set_rubric_section_names(self,rubric_section_names):
143
- self.rubric_section_names = rubric_section_names
144
-
145
- def set_project_detail(self,project_detail):
146
- self.project_detail = project_detail
147
-
148
- def add_project_detail(self,project_detail):
149
- self.project_detail.append(project_detail)
150
-
151
- def get_project_detail(self):
152
- return(self.project_detail)
153
-
154
- # the rubric to generate project question
155
- def project_question_generation_rubric(self ):
156
- headers = [ 'Criteria', 'Initial Questions', 'Quantifiable Value']
157
- # table = '| ' + ' | '.join(headers) + ' |'
158
- table = ' | '.join(headers)
159
- # table += '\n' + '| ' + ' | '.join(['---'] * len(headers)) + ' |'
160
 
161
- # print(len(self.rubric))
162
- for entry in self.rubric:
163
- # print(entry)
164
- # table += f"\n{entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''}"
165
- table += f"\n{entry['criteria']} | {entry['initial_question']} | {entry['quantifiable_value'] or ''}"
166
- # table += f"\n| {entry['section_name']} | {entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''} |"
167
- return table
168
-
169
- # the rubric to grade answers project questions
170
- def project_question_grading_rubric(self):
171
- headers = ['Criteria', 'Explanation', 'Priority', 'Quantifiable Value']
172
- # headers = ['Criteria', 'Explanation', 'Priority', 'Quantifiable Value']
173
- # table = '| ' + ' | '.join(headers) + ' |'
174
- table = ' | '.join(headers)
175
- # table += '\n' + '| ' + ' | '.join(['---'] * len(headers)) + ' |'
 
 
 
 
 
176
 
177
- # print(len(self.rubric))
178
- for entry in self.rubric:
179
- # print(entry)
180
- # table += f"\n{entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''}"
181
- table += f"\n{entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''}"
182
- # table += f"\n| {entry['section_name']} | {entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''} |"
183
- return table
184
-
185
- # different pemutation of columns, to reduce token count
186
- def rubric_to_text(self):
187
- headers = ['Section Name', 'Criteria', 'Explanation', 'Priority', 'Quantifiable Value']
188
- # headers = ['Criteria', 'Explanation', 'Priority', 'Quantifiable Value']
189
- # table = '| ' + ' | '.join(headers) + ' |'
190
- table = ' | '.join(headers)
191
- # table += '\n' + '| ' + ' | '.join(['---'] * len(headers)) + ' |'
192
 
193
- # print(len(self.rubric))
194
- for entry in self.rubric:
195
- # print(entry)
196
- # table += f"\n{entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''}"
197
- table += f"\n{entry['section_name']} | {entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''}"
198
- # table += f"\n| {entry['section_name']} | {entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''} |"
199
- return table
200
-
201
- def rubric_to_verify(self):
202
- # headers = ['Section Name', 'Criteria', 'Explanation', "Priority"]
203
- headers = ['Criteria', 'Explanation', "Priority"]
204
- # table = '| ' + ' | '.join(headers) + ' |'
205
- table = ' | '.join(headers)
206
- # table += '\n' + '| ' + ' | '.join(['---'] * len(headers)) + ' |'
207
 
208
- # print(len(self.rubric))
209
- for entry in self.rubric:
210
- merged_columns = entry['explanation'] + " " + (entry['quantifiable_value'] or '')
211
- # print(entry)
212
- table += f"\n {entry['criteria']} | {merged_columns} | {entry['priority']}"
213
- # table += f"\n| {entry['section_name']} | {entry['criteria']} | {entry['explanation']} | {entry['priority']} | {entry['quantifiable_value'] or ''} |"
214
- return table
215
-
216
- def component_to_text(self):
217
- # If input is empty, return empty string
218
- if not self.component_list:
219
- return ""
220
 
221
- # Get headers from the first row
222
- # headers = list(self.component_list[0].keys())
223
- headers = ["base_project_name", "module", "submodule","unit_type", "quantity"]
224
- # Create header row
225
- table = " | ".join(headers) + "\n"
226
- table += "-" * len(table) + "\n"
227
 
228
- # Add data rows
229
- for row in self.component_list:
230
- # Convert None values to empty strings and all values to strings
231
- values = [str(row[header]) if row[header] is not None else "" for header in headers]
232
- table += " | ".join(values) + "\n"
233
-
234
- return table
235
-
236
-
237
- #Functions that generates based on prompts
238
- def generate_client_follow_up(self ,system_prompt = client_follow_up_with_sample_answers):
239
- # current_form = self.filter_non_empty_answer()
240
  prompt = f"""
241
- {system_prompt}
242
-
243
- # Input:
244
- ## Client Details / Project Requirement Q&A
245
- {self.project_detail}
246
  """
247
-
248
-
249
- # print(f"\n\generate_client_follow_up with prompt: {prompt}\n\n")
250
- result = call_o1_mini(prompt)
251
- # print(f"type, result : {type(result)}, {result}")
252
-
253
- return result
254
-
255
- def rewrite_qa(self,system_prompt = structure_qa):
256
- prompt = f"""
257
- {system_prompt}
258
-
259
- # Input:
260
- ## Client Details / Project Requirement Q&A
261
- {self.get_project_detail()}"""
262
-
263
- result = call_o1_mini(prompt)
264
- self.generated_prd = result
265
- # print(f"POPULATED TABLE : {result}")
266
  return result
267
 
 
 
 
 
268
 
269
- def generate_components(self, system_prompt = define_components):
270
- prompt = f"""
271
- {system_prompt}
272
-
273
- # PRD :
274
- {self.generated_prd}
275
-
276
- """
277
- # print(prompt)
278
- result = call_4o_mini(prompt)
279
- self.derived_plan_test_components = result
280
- return result
281
-
282
- def generate_dev_components(self, system_prompt = define_dev_components):
283
- prompt = f"""
284
- {system_prompt}
285
-
286
- # PRD :
287
- {self.generated_prd}
288
- """
289
-
290
- # print(prompt)
291
- result = call_o1_mini(prompt)
292
-
293
- # need to check whether this would work
294
- self.derived_dev_components = result
295
- return result
296
-
297
-
298
- def generate_plan_test_mandays(self, component_list, system_prompt = derive_plan_test_mandays):
299
- prompt = f"""
300
- {system_prompt}
301
-
302
- # Component List
303
- {component_list}
304
-
305
- """
306
 
307
- # print(prompt)
308
- result = call_o1_mini(prompt)
309
- # self.derived_mandays = result
310
- return result
311
-
312
- def generate_dev_mandays(self, component_list, system_prompt = derive_dev_mandays):
313
- prompt = f"""
314
- {system_prompt}
315
 
316
- # Component List
317
- {component_list}
 
318
 
319
- """
 
 
 
320
 
321
- # print(prompt)
322
- result = call_o1_mini(prompt)
323
- # self.derived_mandays = result
324
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
 
326
- def gather_project_input(self, system_prompt = gather_project_input_prompt):
327
- prompt = f"""
328
- {system_prompt}
329
-
330
- # Client Background:
331
- {self.get_project_detail()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
 
333
- """
334
- print(prompt)
335
- result = call_o1_mini(prompt)
336
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
 
338
- def review_project_input(self, system_prompt = review_project_input_prompt):
339
- prompt = f"""
340
- {system_prompt}
341
 
342
- # Client Background:
343
- {self.get_project_detail()}
344
-
 
 
345
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
 
347
- print(prompt)
348
- result = call_o1_mini(prompt)
349
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
 
351
- def generate_general_sow(self, system_prompt = sow_general):
352
- prompt = f"""
353
- {system_prompt}
354
-
355
- # Quotation:
356
- {self.quotation_cost}
357
 
358
- # Project Requirements Doc:
359
- {self.generated_prd}
360
-
361
- # Components:
362
- {self.derived_plan_test_components}
363
-
364
- {self.derived_dev_components}
365
  """
 
 
 
366
 
367
- print(prompt)
368
- result = call_o1_mini(prompt)
369
- return result
370
-
371
- def generate_sow_details(self, system_prompt = sow_detailed):
372
- prompt = f"""
373
- {system_prompt}
374
-
375
- # Components:
376
- {self.derived_plan_test_components}
377
 
378
- {self.derived_dev_components}
 
 
 
 
379
 
380
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381
 
382
- print(prompt)
383
- result = call_o1_mini(prompt)
384
- return result
385
-
386
-
 
1
+ from common_functions_v4 import *
2
+ from page_prompts_config import *
 
3
  from page_prompts_v3 import *
4
  from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
5
  import openai
6
  from contextlib import contextmanager
7
+ import json
8
+ from state import state
9
+
10
 
11
  @contextmanager
12
  def openai_session():
 
65
 
66
  except Exception as e:
67
  return f"Error generating output: {str(e)}"
68
+
69
+ class Project :
70
+ def __init__(self, project_type, session_id = None):
 
 
 
 
 
71
  self.project_type = project_type
72
  self.session_id = session_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  self.rubric = []
74
  self.rubric_section_names = []
75
  self.component_list = []
76
  self.component_csv = ""
77
  self.flared_csv = ""
78
+ self.project_detail = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
+ for config in PROMPTS.values():
81
+ for output in config.outputs:
82
+ setattr(self, output, "")
83
+
84
+ INPUT_MAPPINGS = {
85
+ 'project_detail': lambda self: self.get_project_detail(),
86
+ 'generated_prd': lambda self: self.generated_prd,
87
+ 'component_list': lambda self: self.component_list,
88
+ 'derived_plan_test_components': lambda self: self.derived_plan_test_components,
89
+ 'derived_dev_components': lambda self: self.derived_dev_components,
90
+ 'quotation_cost': lambda self: self.quotation_cost,
91
+ 'derived_tasks': lambda self: self.derived_tasks,
92
+ 'flared_csv': lambda self: self.flared_csv,
93
+ 'component_to_text': lambda self: self.component_to_text,
94
+ 'quotation_details': lambda self: self.quotation_details,
95
+ 'quotation_table': lambda self: self.quotation_table
96
+ }
97
+
98
+ def execute_prompt(self, prompt_name: str, input_variables: Dict[str, Any]) -> str:
99
+ """Execute a prompt with given input variables
100
 
101
+ Args:
102
+ prompt_name: Name of the prompt config to use from PROMPTS
103
+ input_variables: Dictionary of input variables matching the prompt's required inputs
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
+ Returns:
106
+ str: Generated output from the model
107
+ """
108
+ if prompt_name not in PROMPTS:
109
+ raise ValueError(f"Unknown prompt: {prompt_name}")
 
 
 
 
 
 
 
 
 
110
 
111
+ config = PROMPTS[prompt_name]
 
 
 
 
 
 
 
 
 
 
 
112
 
113
+ # Validate all required inputs are provided
114
+ missing_inputs = set(config.inputs) - set(input_variables.keys())
115
+ if missing_inputs:
116
+ raise ValueError(f"Missing required inputs for {prompt_name}: {missing_inputs}")
 
 
117
 
118
+ # Build prompt by formatting with provided variables
 
 
 
 
 
 
 
 
 
 
 
119
  prompt = f"""
120
+ {config.prompt}
121
+
122
+ {' '.join([f'# {key}:\n{input_variables[key]}\n' for key in config.inputs])}
 
 
123
  """
124
+
125
+ # Call appropriate model based on config
126
+ result = (
127
+ call_4o_mini(prompt)
128
+ if config.model == ModelType.O4_MINI
129
+ else call_o1_mini(prompt)
130
+ )
131
+
132
+ # Store outputs in project attributes if specified
133
+ for output in config.outputs:
134
+ if hasattr(self, output):
135
+ setattr(self, output, result)
136
+
 
 
 
 
 
 
137
  return result
138
 
139
+ #Functions to interact with common_functions_v4.py#
140
+ def set_rubric(self, rubric_list):
141
+ """Set the rubric for the project"""
142
+ self.rubric = rubric_list
143
 
144
+ def set_rubric_section_names(self, section_names):
145
+ """Set the rubric section names for the project"""
146
+ self.rubric_section_names = section_names
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
+ def set_component_list(self, component_list):
149
+ """Set the component list for the project"""
150
+ self.component_list = component_list
 
 
 
 
 
151
 
152
+ def get_project_detail(self):
153
+ """Get the project details as a formatted string"""
154
+ return "\n".join(self.project_detail) if self.project_detail else ""
155
 
156
+ def add_project_detail(self, detail):
157
+ """Add a new project detail"""
158
+ if detail:
159
+ self.project_detail.append(detail)
160
 
161
+ def reset_project(self):
162
+ """Reset all project attributes"""
163
+ for config in PROMPTS.values():
164
+ for output in config.outputs:
165
+ setattr(self, output, "")
166
+
167
+ def generate_client_follow_up(self) -> str:
168
+ """Generate follow-up questions after initial client response"""
169
+ return self.execute_prompt(
170
+ "generate_client_follow_up",
171
+ {
172
+ "project_detail": self.get_project_detail()
173
+ }
174
+ )
175
+
176
+ def gather_project_input(self) -> str:
177
+ """Generate context-aware questions to gather project requirements"""
178
+ return self.execute_prompt(
179
+ "gather_project_input",
180
+ {
181
+ "project_detail": self.get_project_detail()
182
+ }
183
+ )
184
+
185
+ def review_project_input(self) -> str:
186
+ """Review project input and generate follow-up questions to address gaps"""
187
+ return self.execute_prompt(
188
+ "review_project_input",
189
+ {
190
+ "project_detail": self.get_project_detail()
191
+ }
192
+ )
193
+
194
+ #Functions to interact with the frontend#
195
+ def v4_generate_prd_and_components(progress=gr.Progress()):
196
+ # Step 1: Rewrite QA
197
+ progress(0, desc="Progress 1: Generating PRD from Q&A...")
198
+ generated_prd = state.quotation_project.execute_prompt(
199
+ "rewrite_qa",
200
+ {
201
+ "project_detail": state.quotation_project.get_project_detail()
202
+ }
203
+ )
204
 
205
+ # Step 2: Generate Components
206
+ progress(0.4, desc="Progress 2: Generating Planning & Testing Component...")
207
+ plan_test_component_list = state.quotation_project.execute_prompt(
208
+ "generate_components",
209
+ {
210
+ "generated_prd": state.quotation_project.generated_prd
211
+ }
212
+ )
213
+
214
+ progress(0.7, desc="Progress 3: Generating Development Components...")
215
+ dev_component_list = state.quotation_project.execute_prompt(
216
+ "generate_dev_components",
217
+ {
218
+ "generated_prd": state.quotation_project.generated_prd
219
+ }
220
+ )
221
+
222
+ progress(1.0, desc="Complete!")
223
+ return [
224
+ generated_prd,
225
+ plan_test_component_list,
226
+ dev_component_list,
227
+ dev_component_list,
228
+ "Generated PRD and Component List! Click Generate Final Quotation To Generate Quotation"
229
+ ]
230
+
231
+ def v4_generate_quotation(updated_plan_test_component, updated_dev_component, progress=gr.Progress()):
232
+ state.quotation_project.derived_plan_test_components = updated_plan_test_component
233
+ state.quotation_project.derived_dev_components = updated_dev_component
234
 
235
+ # Generate mandays for plan & test components
236
+ progress(0.5, desc="Progress 1: Deriving Mandays for Plan & Test Components...")
237
+ plan_test_mandays = state.quotation_project.execute_prompt(
238
+ "generate_plan_test_mandays",
239
+ {
240
+ "component_list": updated_plan_test_component
241
+ }
242
+ )
243
+
244
+ # Generate mandays for dev components
245
+ progress(0.7, desc="Progress 2: Deriving Mandays for Dev Components...")
246
+ dev_mandays = state.quotation_project.execute_prompt(
247
+ "generate_dev_mandays",
248
+ {
249
+ "component_list": updated_dev_component
250
+ }
251
+ )
252
+
253
+ # Process results
254
+ plan_test_df = pd.read_csv(StringIO(plan_test_mandays), on_bad_lines='skip')
255
+ dev_df = pd.read_csv(StringIO(dev_mandays), on_bad_lines='skip')
256
+
257
+ plan_test_df['mandays'] = pd.to_numeric(plan_test_df['mandays'].replace('', '0'), errors='coerce').fillna(0)
258
+ dev_df['mandays'] = pd.to_numeric(dev_df['mandays'].replace('', '0'), errors='coerce').fillna(0)
259
 
260
+ total_mandays, total_cost, estimated_months = calculate_mandays_and_costs(plan_test_df, dev_df)
 
 
261
 
262
+ progress(1.0, desc="Complete!")
263
+ cost_summary = f"""
264
+ Total Mandays: {total_mandays:.2f}
265
+ Total Cost: ${total_cost:,.2f}
266
+ ({estimated_months:.2}months)
267
  """
268
+ return [plan_test_df, dev_df, "Generated Quotation!", cost_summary]
269
+
270
+ def v4_generate_sow(generated_prd, plan_test_component, dev_component, cost, progress=gr.Progress()):
271
+ state.quotation_project.derived_plan_test_components = plan_test_component
272
+ state.quotation_project.derived_dev_components = dev_component
273
+ state.quotation_project.quotation_cost = cost
274
+
275
+ # Generate general SOW
276
+ progress(0.4, desc="Progress 1: Drafting SOW")
277
+ general_sow = state.quotation_project.execute_prompt(
278
+ "generate_BD_SOW",
279
+ {
280
+ "generated_prd": generated_prd,
281
+ "derived_plan_test_components": plan_test_component,
282
+ "derived_dev_components": dev_component,
283
+ "quotation_cost": cost
284
+ }
285
+ )
286
+
287
+ # Generate detailed SOW
288
+ progress(0.8, desc="Progress 2: Drafting Technical SOW")
289
+ detailed_sow_json = state.quotation_project.execute_prompt(
290
+ "generate_Tech_SOW",
291
+ {
292
+ "derived_plan_test_components": plan_test_component,
293
+ "derived_dev_components": dev_component
294
+ }
295
+ )
296
 
297
+ try:
298
+ # Parse detailed_sow into a JSON object
299
+ detailed_sow_json = json.loads(detailed_sow_json)
300
+
301
+ # Extract required fields
302
+ scope_summary = detailed_sow_json.get("scope_summary", "")
303
+ modules_and_functional_requirements = detailed_sow_json.get("modules_and_functional_requirements", "")
304
+ out_of_scope = detailed_sow_json.get("out_of_scope", "")
305
+ system_flow = detailed_sow_json.get("system_flow", "")
306
+
307
+ # Combine all fields into detailed_sow
308
+ detailed_sow = f"{scope_summary}\n\n{modules_and_functional_requirements}\n\n{out_of_scope}\n\n{system_flow}"
309
+
310
+ # Create final SOW
311
+ final_general_sow = f"**Hi , some sections of this SOW is generated seprately from the main draft. youll have to move this to the right spot manually ;)\n **Project Quotation:**\n{cost}\n{modules_and_functional_requirements}\n\n {general_sow}\nEOF"
312
+ return [final_general_sow, detailed_sow, "Generated SOW!"]
313
+ except Exception as e:
314
+ return ["Error generating SOW", "Error: " + str(e), "Failed to generate SOW"]
315
+
316
+ def v4_recalculate_cost(plan_test_df, dev_df):
317
+ """Recalculate costs based on modified dataframe values"""
318
+ try:
319
+ # Convert mandays columns to numeric, replacing empty values with 0
320
+ plan_test_df['mandays'] = pd.to_numeric(plan_test_df['mandays'].replace('', '0'), errors='coerce').fillna(0)
321
+ dev_df['mandays'] = pd.to_numeric(dev_df['mandays'].replace('', '0'), errors='coerce').fillna(0)
322
 
323
+ # Calculate totals
324
+ total_mandays, total_cost, estimated_months = calculate_mandays_and_costs(plan_test_df, dev_df)
 
 
 
 
325
 
326
+ cost_summary = f"""
327
+ Total Mandays: {total_mandays:.2f}
328
+ Total Cost: ${total_cost:,.2f}
329
+ ({estimated_months:.2}months)
 
 
 
330
  """
331
+ return f"Successfully Updated Quotation. SessionID:{state.quotation_project.session_id}", cost_summary
332
+ except Exception as e:
333
+ return f"Error recalculating costs: {str(e)}", f"Error recalculating costs: {str(e)}"
334
 
 
 
 
 
 
 
 
 
 
 
335
 
336
+ def update_display_mode(mode, content):
337
+ if mode == "Textbox":
338
+ return gr.update(visible=True, value=content), gr.update(visible=False, value=content)
339
+ else:
340
+ return gr.update(visible=False, value=content), gr.update(visible=True, value=content)
341
 
342
+ def update_system_prompts(requirement_prompt, plan_test_prompt, dev_prompt,
343
+ plantest_mandays_prompt, dev_mandays_prompt,
344
+ bd_sow_prompt, tech_sow_prompt):
345
+ """Update system prompts when edited by user"""
346
+ try:
347
+ # Update prompts in config
348
+ PROMPTS["rewrite_qa"].prompt = requirement_prompt
349
+ PROMPTS["generate_components"].prompt = plan_test_prompt
350
+ PROMPTS["generate_dev_components"].prompt = dev_prompt
351
+ PROMPTS["generate_plan_test_mandays"].prompt = plantest_mandays_prompt
352
+ PROMPTS["generate_dev_mandays"].prompt = dev_mandays_prompt
353
+ PROMPTS["generate_BD_SOW"].prompt = bd_sow_prompt
354
+ PROMPTS["generate_Tech_SOW"].prompt = tech_sow_prompt
355
+
356
+ return "✅ System prompts updated successfully"
357
+ except Exception as e:
358
+ return f"❌ Error updating prompts: {str(e)}"
359
 
 
 
 
 
 
app.py CHANGED
@@ -1,7 +1,9 @@
1
  import gradio as gr
2
- from v3_functions import *
 
3
  from google_drive import *
4
  from notion import *
 
5
 
6
  with open("page_main.css", "r") as file:
7
  custom_css = file.read()
@@ -284,7 +286,7 @@ with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)" , css=custom_css) as
284
 
285
  #Update Prompts Btn
286
  save_prompts_btn.click(
287
- fn=update_system_prompts,
288
  inputs=[
289
  requirement_prompt_editor,
290
  plan_text_prompt_editor,
@@ -329,31 +331,27 @@ with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)" , css=custom_css) as
329
 
330
  #Generate PRD & Components
331
  page_generate_prd_btn.click(
332
- fn=v3_generate_prd_and_components,
333
  outputs=[page_prd_box_v3_text,page_plan_test_component_table_v3_text,page_dev_component_table_v3_text,page_dev_component_table_v3_markdown,page_progress_update]
334
  )
335
 
336
  #Recalculate Cost
337
  page_recalc_btn.click(
338
- fn=v3_recalculate_cost,
339
  inputs=[page_plan_test_mandays_v3,page_dev_mandays_v3],
340
  outputs=[page_progress_update, page_units_output]
341
  )
342
 
343
  #Generate Quotation
344
  page_generate_quotation_btn.click(
345
- fn=v3_generate_quotation,
346
  inputs=[page_plan_test_component_table_v3_text,page_dev_component_table_v3_text],
347
  outputs=[page_plan_test_mandays_v3,page_dev_mandays_v3,page_progress_update,page_units_output]
348
  )
349
 
350
- page_save_quotation_btn.click(
351
-
352
- )
353
-
354
  #Generate SOW
355
  page_generate_sow_btn.click(
356
- fn=v3_generate_sow,
357
  inputs=[page_prd_box_v3_text,page_plan_test_component_table_v3_text,page_dev_component_table_v3_text,page_units_output],
358
  outputs=[page_general_sow_text,page_detailed_sow_text,page_progress_update]
359
  )
@@ -403,6 +401,9 @@ with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)" , css=custom_css) as
403
  inputs=[display_mode_tech_sow ,page_detailed_sow_text],
404
  outputs=[page_detailed_sow_text , page_detailed_sow_markdown ]
405
  )
 
 
 
406
 
407
  # Replace single textbox with separate components
408
  with gr.Tab(label= "Load Project"):
@@ -450,7 +451,5 @@ with gr.Blocks(title="v3 Page Quotation Chatbot (with SOW)" , css=custom_css) as
450
  # outputs=[status_box, fetched_requirements_box, message_box, fetched_rubric_box, fetched_component_box]
451
  )
452
 
453
-
454
-
455
  if __name__ == "__main__":
456
  page_interface.launch(share=True)
 
1
  import gradio as gr
2
+ from Project import *
3
+ from common_functions_v4 import *
4
  from google_drive import *
5
  from notion import *
6
+ from state import state
7
 
8
  with open("page_main.css", "r") as file:
9
  custom_css = file.read()
 
286
 
287
  #Update Prompts Btn
288
  save_prompts_btn.click(
289
+ fn=update_system_prompts,
290
  inputs=[
291
  requirement_prompt_editor,
292
  plan_text_prompt_editor,
 
331
 
332
  #Generate PRD & Components
333
  page_generate_prd_btn.click(
334
+ fn=v4_generate_prd_and_components,
335
  outputs=[page_prd_box_v3_text,page_plan_test_component_table_v3_text,page_dev_component_table_v3_text,page_dev_component_table_v3_markdown,page_progress_update]
336
  )
337
 
338
  #Recalculate Cost
339
  page_recalc_btn.click(
340
+ fn=v4_recalculate_cost,
341
  inputs=[page_plan_test_mandays_v3,page_dev_mandays_v3],
342
  outputs=[page_progress_update, page_units_output]
343
  )
344
 
345
  #Generate Quotation
346
  page_generate_quotation_btn.click(
347
+ fn=v4_generate_quotation,
348
  inputs=[page_plan_test_component_table_v3_text,page_dev_component_table_v3_text],
349
  outputs=[page_plan_test_mandays_v3,page_dev_mandays_v3,page_progress_update,page_units_output]
350
  )
351
 
 
 
 
 
352
  #Generate SOW
353
  page_generate_sow_btn.click(
354
+ fn=v4_generate_sow,
355
  inputs=[page_prd_box_v3_text,page_plan_test_component_table_v3_text,page_dev_component_table_v3_text,page_units_output],
356
  outputs=[page_general_sow_text,page_detailed_sow_text,page_progress_update]
357
  )
 
401
  inputs=[display_mode_tech_sow ,page_detailed_sow_text],
402
  outputs=[page_detailed_sow_text , page_detailed_sow_markdown ]
403
  )
404
+
405
+
406
+
407
 
408
  # Replace single textbox with separate components
409
  with gr.Tab(label= "Load Project"):
 
451
  # outputs=[status_box, fetched_requirements_box, message_box, fetched_rubric_box, fetched_component_box]
452
  )
453
 
 
 
454
  if __name__ == "__main__":
455
  page_interface.launch(share=True)
common_functions_v3.py → common_functions_v4.py RENAMED
@@ -1,4 +1,3 @@
1
- from Project import *
2
  import pandas as pd
3
  from io import StringIO
4
  # from pathlib import Path
@@ -6,16 +5,15 @@ import psycopg2
6
  from psycopg2.extras import RealDictCursor
7
  import gradio as gr
8
  import datetime
9
-
10
-
11
- # Initialize project
12
- quotation_project = Project(ProjectType.Page)
13
- no_active_session = "**Current Session**:None"
14
-
15
  import json
16
  import os
17
  from dotenv import load_dotenv
18
  from langtrace_python_sdk import langtrace
 
 
 
 
19
 
20
  load_dotenv()
21
  api_key = os.getenv("LANGTRACE_API_KEY")
@@ -26,7 +24,7 @@ langtrace.init(api_key=api_key)
26
  #This function is not being used
27
  def check_session_exists():
28
  """Check if a valid session exists"""
29
- return quotation_project.session_id is not None
30
 
31
  def get_db_connection():
32
  """Establishes and returns a new database connection."""
@@ -130,10 +128,10 @@ def process_response(answer, history):
130
  sanitized_answer = sanitize_text(str(answer))
131
 
132
  # Add the user's answer to project details
133
- quotation_project.add_project_detail(sanitized_answer)
134
 
135
  # Update session in database if we have a session_id
136
- if quotation_project.session_id:
137
  try:
138
  conn = get_db_connection()
139
  cur = conn.cursor()
@@ -143,7 +141,7 @@ def process_response(answer, history):
143
  UPDATE sessions
144
  SET project_requirement = %s
145
  WHERE session_id = %s
146
- """, (json.dumps(quotation_project.project_detail), quotation_project.session_id))
147
 
148
  conn.commit()
149
  cur.close()
@@ -153,15 +151,15 @@ def process_response(answer, history):
153
 
154
  # Generate next question based on conversation stage
155
  if len(history) == 1: # After first client information question
156
- next_question = quotation_project.generate_client_follow_up()
157
  elif len(history) == 2: # After client follow-up
158
  # next_question = quotation_project.generate_questions()
159
- next_question = quotation_project.gather_project_input()
160
  else: # Subsequent project requirements questions
161
  # next_question = quotation_project.generate_follow_up()
162
- next_question = quotation_project.review_project_input()
163
 
164
- # Ensure we're adding a proper tuple to history
165
  if isinstance(answer, str) and isinstance(next_question, str):
166
  history.append((answer, next_question))
167
 
@@ -240,10 +238,11 @@ def start_chat():
240
  session_id = create_new_session()
241
 
242
  # Set the rubric and session_id for the project
243
-
244
- # Update session_id in Project instance
245
- quotation_project.reset_project()
246
- quotation_project.session_id = session_id
 
247
 
248
  initial_history = [(None, client_initial_question)]
249
  return client_initial_question, initial_history, f"Current Session: {session_id}"
@@ -252,18 +251,18 @@ def start_chat():
252
  def get_project_state():
253
  """Get current state of quotation_project project"""
254
  # Create status boxes
255
- status = f"""Session ID: {quotation_project.session_id}
256
- Rubric Loaded: {bool(quotation_project.rubric)}
257
- Components Loaded: {bool(quotation_project.component_list)}
258
- Requirements Loaded: {bool(quotation_project.project_detail)}"""
259
 
260
  # Format requirements as a table if they exist
261
  requirements_table = ""
262
- if quotation_project.project_detail:
263
- print(f"\n\nrequirements : {type(quotation_project.project_detail)}")
264
  # Create markdown box for requirements
265
  # requirements_table = "\n\n### Project Requirements\n```markdown\n"
266
- for index,requirement in enumerate(list(quotation_project.project_detail)):
267
  requirements_table += f"\n_____________\n"
268
  requirements_table += f"#Requirement {index+1}\n {requirement}"
269
 
@@ -289,10 +288,9 @@ def fetch_session(session_id):
289
  print(session)
290
  if session:
291
  # 2. Update quotation_project with session data
292
- quotation_project.session_id = session_id
293
-
294
- # Set project requirements if they exist
295
- if session['project_requirement']:
296
  try:
297
  # Check if the project requirement is a string
298
  if isinstance(session['project_requirement'], str):
@@ -306,22 +304,21 @@ def fetch_session(session_id):
306
  requirements = session['project_requirement']
307
 
308
  # Clear existing details and set new ones
309
- quotation_project.project_detail = []
310
  for requirement in requirements:
311
- quotation_project.add_project_detail(requirement.strip()) # Use strip() to remove any leading/trailing whitespace
312
  except Exception as e:
313
  return "", "", f"Error processing project requirements in session {session_id}: {str(e)}", no_active_session
 
 
 
 
314
 
315
- # 3. Fetch and set rubric
316
- section_name_list, rubric_list = get_section_name_and_rubric_list()
317
- quotation_project.set_rubric(rubric_list)
318
- quotation_project.set_rubric_section_names(section_name_list)
319
-
320
- # 4. Fetch and set components
321
- component_list = get_latest_components()
322
- quotation_project.set_component_list(component_list)
323
 
324
- return (*get_project_state(), f"Successfully loaded session {session_id} with all data", f"Current Session: {session_id}")
325
  # "\n".join(rubric_list), # Return rubric list as a string
326
  # component_list) # Ensure to extract string values
327
 
@@ -345,27 +342,27 @@ def insert_quotation(csv_string, total_price, total_mandays, note=None, task_bre
345
  SELECT COALESCE(MAX(version), 0) + 1
346
  FROM quotations
347
  WHERE session_id = %s
348
- """, (quotation_project.session_id,))
349
 
350
  result = cur.fetchone()
351
  version = result[0] if result else 1 # Default to version 1 if no result
352
  # Get the next version number
353
  total_price = float(total_price) if total_price is not None else None
354
  total_mandays = float(total_mandays) if total_mandays is not None else None
355
- structured_details = quotation_project.generated_prd
356
 
357
  # Convert project details to JSON string
358
  # Append the task table here, so we know what tasks are not in the quantity table
359
  # (context : v3 function calls it slightly differently, csv_string will be the quantity table)
360
  # why ? lazy alter table to add new column, then create a whole new if else statement to handle this
361
- details = f"{json.dumps(quotation_project.project_detail)} + {task_breakdown_v3}" if task_breakdown_v3 else json.dumps(quotation_project.project_detail)
362
 
363
  # Insert new quotation
364
  cur.execute("""
365
  INSERT INTO quotations (session_id, version, details, quotation_csv, total_price, total_mandays,structured_details)
366
  VALUES (%s, %s, %s, %s, %s, %s,%s)
367
  """, (
368
- quotation_project.session_id,
369
  version,
370
  details,
371
  csv_string,
@@ -395,7 +392,7 @@ def create_folder_and_save_csv(df, folder_name, file_name):
395
  return "No data to save."
396
 
397
  def create_folder():
398
- session_id = quotation_project.session_id
399
  timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
400
  folder_name = f"quotation_{session_id}_{timestamp}"
401
  os.makedirs(folder_name, exist_ok=True)
 
 
1
  import pandas as pd
2
  from io import StringIO
3
  # from pathlib import Path
 
5
  from psycopg2.extras import RealDictCursor
6
  import gradio as gr
7
  import datetime
8
+ from Project import *
 
 
 
 
 
9
  import json
10
  import os
11
  from dotenv import load_dotenv
12
  from langtrace_python_sdk import langtrace
13
+ from state import state
14
+ from page_prompts_config import PROMPTS
15
+
16
+ no_active_session = "**Current Session**:None"
17
 
18
  load_dotenv()
19
  api_key = os.getenv("LANGTRACE_API_KEY")
 
24
  #This function is not being used
25
  def check_session_exists():
26
  """Check if a valid session exists"""
27
+ return state.quotation_project.session_id is not None
28
 
29
  def get_db_connection():
30
  """Establishes and returns a new database connection."""
 
128
  sanitized_answer = sanitize_text(str(answer))
129
 
130
  # Add the user's answer to project details
131
+ state.quotation_project.add_project_detail(sanitized_answer)
132
 
133
  # Update session in database if we have a session_id
134
+ if state.quotation_project.session_id:
135
  try:
136
  conn = get_db_connection()
137
  cur = conn.cursor()
 
141
  UPDATE sessions
142
  SET project_requirement = %s
143
  WHERE session_id = %s
144
+ """, (json.dumps(state.quotation_project.project_detail), state.quotation_project.session_id))
145
 
146
  conn.commit()
147
  cur.close()
 
151
 
152
  # Generate next question based on conversation stage
153
  if len(history) == 1: # After first client information question
154
+ next_question = state.quotation_project.generate_client_follow_up()
155
  elif len(history) == 2: # After client follow-up
156
  # next_question = quotation_project.generate_questions()
157
+ next_question = state.quotation_project.gather_project_input()
158
  else: # Subsequent project requirements questions
159
  # next_question = quotation_project.generate_follow_up()
160
+ next_question = state.quotation_project.review_project_input()
161
 
162
+ # Ensure we're adding a proper tuple to hisxtory
163
  if isinstance(answer, str) and isinstance(next_question, str):
164
  history.append((answer, next_question))
165
 
 
238
  session_id = create_new_session()
239
 
240
  # Set the rubric and session_id for the project
241
+ state.quotation_project.reset_project()
242
+ state.quotation_project.session_id = session_id
243
+
244
+ # Get the initial question from prompts config
245
+ client_initial_question = PROMPTS["client_initial_question"].prompt
246
 
247
  initial_history = [(None, client_initial_question)]
248
  return client_initial_question, initial_history, f"Current Session: {session_id}"
 
251
  def get_project_state():
252
  """Get current state of quotation_project project"""
253
  # Create status boxes
254
+ status = f"""Session ID: {state.quotation_project.session_id}
255
+ Rubric Loaded: {bool(state.quotation_project.rubric)}
256
+ Components Loaded: {bool(state.quotation_project.component_list)}
257
+ Requirements Loaded: {bool(state.quotation_project.project_detail)}"""
258
 
259
  # Format requirements as a table if they exist
260
  requirements_table = ""
261
+ if state.quotation_project.project_detail:
262
+ print(f"\n\nrequirements : {type(state.quotation_project.project_detail)}")
263
  # Create markdown box for requirements
264
  # requirements_table = "\n\n### Project Requirements\n```markdown\n"
265
+ for index,requirement in enumerate(list(state.quotation_project.project_detail)):
266
  requirements_table += f"\n_____________\n"
267
  requirements_table += f"#Requirement {index+1}\n {requirement}"
268
 
 
288
  print(session)
289
  if session:
290
  # 2. Update quotation_project with session data
291
+ state.quotation_project.session_id = session_id
292
+
293
+ if session['project_requirement']:
 
294
  try:
295
  # Check if the project requirement is a string
296
  if isinstance(session['project_requirement'], str):
 
304
  requirements = session['project_requirement']
305
 
306
  # Clear existing details and set new ones
307
+ state.quotation_project.project_detail = []
308
  for requirement in requirements:
309
+ state.quotation_project.add_project_detail(requirement.strip()) # Use strip() to remove any leading/trailing whitespace
310
  except Exception as e:
311
  return "", "", f"Error processing project requirements in session {session_id}: {str(e)}", no_active_session
312
+
313
+ section_name_list, rubric_list = get_section_name_and_rubric_list()
314
+ state.quotation_project.set_rubric(rubric_list)
315
+ state.quotation_project.set_rubric_section_names(section_name_list)
316
 
317
+ # 4. Fetch and set components
318
+ component_list = get_latest_components()
319
+ state.quotation_project.set_component_list(component_list)
 
 
 
 
 
320
 
321
+ return (*get_project_state(), f"Successfully loaded session {session_id} with all data", f"Current Session: {session_id}")
322
  # "\n".join(rubric_list), # Return rubric list as a string
323
  # component_list) # Ensure to extract string values
324
 
 
342
  SELECT COALESCE(MAX(version), 0) + 1
343
  FROM quotations
344
  WHERE session_id = %s
345
+ """, (state.quotation_project.session_id,))
346
 
347
  result = cur.fetchone()
348
  version = result[0] if result else 1 # Default to version 1 if no result
349
  # Get the next version number
350
  total_price = float(total_price) if total_price is not None else None
351
  total_mandays = float(total_mandays) if total_mandays is not None else None
352
+ structured_details = state.quotation_project.generated_prd
353
 
354
  # Convert project details to JSON string
355
  # Append the task table here, so we know what tasks are not in the quantity table
356
  # (context : v3 function calls it slightly differently, csv_string will be the quantity table)
357
  # why ? lazy alter table to add new column, then create a whole new if else statement to handle this
358
+ details = f"{json.dumps(state.quotation_project.project_detail)} + {task_breakdown_v3}" if task_breakdown_v3 else json.dumps(state.quotation_project.project_detail)
359
 
360
  # Insert new quotation
361
  cur.execute("""
362
  INSERT INTO quotations (session_id, version, details, quotation_csv, total_price, total_mandays,structured_details)
363
  VALUES (%s, %s, %s, %s, %s, %s,%s)
364
  """, (
365
+ state.quotation_project.session_id,
366
  version,
367
  details,
368
  csv_string,
 
392
  return "No data to save."
393
 
394
  def create_folder():
395
+ session_id = state.quotation_project.session_id
396
  timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
397
  folder_name = f"quotation_{session_id}_{timestamp}"
398
  os.makedirs(folder_name, exist_ok=True)
page_prompts_config.py CHANGED
@@ -16,6 +16,79 @@ class PromptConfig:
16
  description: str = ""
17
 
18
  PROMPTS = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  "rewrite_qa": PromptConfig(
20
  prompt="""Rewrite this for clarity while keeping all specific details, metrics, and constraints.
21
  Do not include context or assumptions beyond the input provided.
@@ -615,7 +688,7 @@ PROMPTS = {
615
 
616
  Expected output should be professional, well-structured, and designed to help clients and stakeholders clearly understand the project scope. I’m going to tip you for a better outcome!
617
  """,
618
- inputs=['quotation_cost','generated_prd','derived_plan_test_components','derived_dev_components'],
619
  outputs=['generated_BD_sow'],
620
  editable=True,
621
  model=ModelType.O1_MINI,
 
16
  description: str = ""
17
 
18
  PROMPTS = {
19
+ "client_initial_question": PromptConfig(
20
+ prompt= """
21
+ # Client Information Gathering Questions
22
+
23
+ ### Company Background and Industry
24
+ 1. Can you provide some background about your company?
25
+ 2. Which industry do you operate in, and what is your company’s niche or specialization?
26
+ 3. Who are your primary customers?
27
+ 4. What are the main objectives you want to achieve?
28
+ 5. What key features or functionalities do you need?
29
+
30
+ ### Current Challenges
31
+ 6. What are the biggest challenges your firm is currently facing?
32
+ 7. Can you describe your current processes?
33
+
34
+ ### Workflow and System Impact
35
+ 8. How will this solution benefit your firm as a whole?
36
+
37
+ ### Existing Workflow or System
38
+ 9. Can you describe your current workflow or system?
39
+
40
+ ### Pain Point Identification
41
+ 10. Where is your current system falling short or causing delays?
42
+ 11. Are there any parts of the process that are particularly time-consuming/ prone to error?
43
+ """,
44
+ inputs=[],
45
+ outputs=[],
46
+ editable=True,
47
+ model="",
48
+ description="This is the first question that is fixed",
49
+ ),
50
+
51
+
52
+ "generate_client_follow_up": PromptConfig(
53
+ prompt= """
54
+ Based on the initial list of questions and the client’s provided answers, generate **insightful and targeted follow-up questions** that will help deepen my understanding of the following critical aspects:
55
+
56
+ 1. **Client Overview**
57
+ **Objective:** ask relevant questions that will directly contribute to better project requirements gathering. (ie: department team that the project is meant for ..etc)
58
+
59
+ 2. **Project Vision and Value**
60
+ **Objective:** Clarify the intended impact of the project on the client’s business. Understand how it will improve their processes, solve key challenges, and deliver measurable benefits.
61
+ **Focus:** Investigate specific outcomes, immediate expected goals, and how success will be defined.
62
+
63
+ 3. **Existing System or Workflow Description**
64
+ **Objective:** Delve deeper into the client’s current tools, workflows, and processes to uncover pain points, integration requirements, and opportunities for optimization.
65
+ **Focus:** Identify inefficiencies, technical limitations, or gaps that the project will address.
66
+
67
+ 4. **Budget and Resource Constraints**
68
+ **Objective:** Clearly define any limitations or constraints—financial, resource-based, or time-related—that could impact project success.
69
+ **Focus:** Understand the flexibility of the budget, timeline expectations, and resource availability.
70
+
71
+ Instructions:
72
+ Each question should:
73
+ Build on provided client information
74
+ Non repetitive, and unique. Avoid asking similar questions.
75
+ Include realistic sample answers relevant to the client's context
76
+ Focus on gathering quantifiable or specific information
77
+
78
+
79
+ Output top 10 questions in the following format:
80
+ <question>(sample answers)
81
+ Just return the text and NOTHING else. Do not overexplain, omit code guards.
82
+ """,
83
+ inputs=['project_detail'],
84
+ outputs=['client_follow_up_questions'],
85
+ editable=True,
86
+ model=ModelType.O1_MINI,
87
+ description="Genereate follow-up questions from the provided answers",
88
+ ),
89
+
90
+
91
+
92
  "rewrite_qa": PromptConfig(
93
  prompt="""Rewrite this for clarity while keeping all specific details, metrics, and constraints.
94
  Do not include context or assumptions beyond the input provided.
 
688
 
689
  Expected output should be professional, well-structured, and designed to help clients and stakeholders clearly understand the project scope. I’m going to tip you for a better outcome!
690
  """,
691
+ inputs=['generated_prd','derived_plan_test_components','derived_dev_components','quotation_cost'],
692
  outputs=['generated_BD_sow'],
693
  editable=True,
694
  model=ModelType.O1_MINI,
state.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ class ProjectType(Enum):
4
+ Page = "Page"
5
+ Sage = "Sage"
6
+ Engage = "Engage"
7
+
8
+ class GlobalState:
9
+ def __init__(self):
10
+ self._quotation_project = None
11
+
12
+ @property
13
+ def quotation_project(self):
14
+ if self._quotation_project is None:
15
+ from Project import Project
16
+ self._quotation_project = Project(ProjectType.Page)
17
+ return self._quotation_project
18
+
19
+ @quotation_project.setter
20
+ def quotation_project(self, value):
21
+ self._quotation_project = value
22
+
23
+ state = GlobalState()