QuotationChatbot_v5 / Project.py
jess
chore: clean up buttons, and debug logical error (prompt editor, prompt keys)
69cbcdc
import ast
import csv
import re
from typing import Any, Dict
from common_functions_v4 import *
from prompt_configs import PROMPTS, ModelType
from config_classes import *
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
import openai
from contextlib import asynccontextmanager, contextmanager
import json
from sample_permutations import *
from io import StringIO
import pandas as pd
@contextmanager
def openai_session():
"""Context manager to properly handle OpenAI API sessions"""
try:
client = openai.OpenAI()
yield client
finally:
if hasattr(client, 'close'):
client.close()
@asynccontextmanager
async def async_openai_session():
"""Context manager to properly handle OpenAI API sessions"""
try:
client = openai.AsyncOpenAI()
yield client
finally:
if hasattr(client, 'close'):
client.close()
@with_langtrace_root_span()
def call_o1_mini(prompt):
print(f"calling o1-mini with prompt: {prompt[:500]}")
with openai_session() as client:
try:
response = client.chat.completions.create(
model="o1-mini",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
except Exception as e:
return f"Error generating output: {str(e)}"
@with_langtrace_root_span()
def call_4o_mini(prompt):
print(f"calling 4o-mini with prompt: {prompt[:500]}")
with openai_session() as client:
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
except Exception as e:
return f"Error generating output: {str(e)}"
@with_langtrace_root_span()
async def async_call_o1_mini(prompt):
print(f"calling o1-mini with prompt: {prompt[:500]}")
async with async_openai_session() as client:
try:
response = await client.chat.completions.create(
model="o1-mini",
messages=[{"role": "user", "content": prompt}]
)
return response.choices[0].message.content
except Exception as e:
return f"Error generating output: {str(e)}"
@with_langtrace_root_span()
async def async_call_4o_mini(prompt):
print(f"[DEBUG] Entering async_call_4o_mini")
print(f"[DEBUG] Prompt received: {prompt[:500]}")
async with async_openai_session() as client:
print(f"[DEBUG] OpenAI client session established")
try:
print(f"[DEBUG] Attempting to create chat completion")
# Create completion without await since it returns a regular object
response = await client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}]
)
print(f"[DEBUG] Chat completion successful")
print(f"[DEBUG] Response content: {response.choices[0].message.content}")
return response.choices[0].message.content
except Exception as e:
print(f"[DEBUG] Error occurred: {str(e)}")
print(f"[DEBUG] Error type: {type(e)}")
return f"Error generating output: {str(e)}"
finally:
print("[DEBUG] Exiting async_call_4o_mini")
class Project:
def __init__(self, project_type, session_id=None):
self.project_type = project_type
self.session_id = session_id
self.rubric = []
self.rubric_section_names = []
self.component_list = []
self.project_detail = []
# Add these new attributes
self.mandays_results = []
self.mvp_mandays_results = []
self._db_config = {}
# Initialize all prompt outputs as attributes
for config in PROMPTS.values():
for output in config.outputs:
setattr(self, output, "")
# Input mapping for prompt execution
INPUT_MAPPINGS = {
'project_detail': lambda self: self.get_project_detail(),
'generated_prd': lambda self: self.generated_prd,
'configuration_type': lambda self: self.configuration_type,
'project_detail': lambda self: self.project_detail,
'follow_up_questions': lambda self: self.follow_up_questions,
'requirements_rubric': lambda self: self.requirements_rubric,
'generated_engage_follow_up_questions': lambda self: self.generated_engage_follow_up_questions,
'generated_page_follow_up_questions': lambda self: self.generated_page_follow_up_questions,
'generated_engage_further_follow_up_questions': lambda self: self.generated_engage_further_follow_up_questions,
'generated_page_further_follow_up_questions': lambda self: self.generated_page_further_follow_up_questions,
'generated_intent_list': lambda self: self.generated_intent_list,
'generated_plan_test_components': lambda self: self.generated_plan_test_components,
'generated_page_dev_components': lambda self: self.generated_page_dev_components,
'generated_engage_dev_components': lambda self: self.generated_engage_dev_components,
'reformatted_dev_components': lambda self: self.reformatted_dev_components,
'generated_intents_csv': lambda self: self.generated_intents_csv,
'generated_plan_test_mandays': lambda self: self.generated_plan_test_mandays,
'generated_dev_mandays': lambda self: self.generated_dev_mandays,
'generated_mvp_prd': lambda self: self.generated_mvp_prd,
'combined_cost_summary': lambda self: self.combined_cost_summary,
'generated_BD_SOW': lambda self: self.generated_BD_SOW,
'generated_Tech_SOW': lambda self: self.generated_Tech_SOW,
'identified_planning_testing_components': lambda self: self.identified_planning_testing_components,
'identified_development_components': lambda self: self.identified_development_components,
'identified_mvp_intents': lambda self: self.identified_mvp_intents,
'identified_priority_components': lambda self: self.identified_priority_components,
'revised_mandays_estimates': lambda self: self.revised_mandays_estimates,
'generated_MVP_mandays': lambda self: self.generated_MVP_mandays,
}
def _build_prompt(self, config, input_variables):
"""Build the prompt string from config and input variables"""
formatted_inputs_list = []
for key in config.inputs:
value = input_variables[key]
if isinstance(value, list):
value = "".join(value)
formatted_inputs_list.append("# " + str(key) + ":\n" + str(value) + "\n")
formatted_inputs = " ".join(formatted_inputs_list)
return f"""
{config.prompt}
{formatted_inputs}
"""
def _validate_and_fill_inputs(self, config, input_variables):
"""Validate and auto-fill input variables"""
input_variables = input_variables or {}
# Auto-fill inputs from project attributes using INPUT_MAPPINGS
for input_name in config.inputs:
if input_name not in input_variables:
if input_name not in self.INPUT_MAPPINGS:
raise ValueError(f"No mapping defined for required input: {input_name}")
try:
input_variables[input_name] = self.INPUT_MAPPINGS[input_name](self)
except Exception as e:
raise ValueError(f"Failed to get value for input {input_name}: {str(e)}")
# Validate all required inputs are available and not empty
missing_inputs = []
for input_name in config.inputs:
if input_name not in input_variables or not input_variables[input_name]:
missing_inputs.append(input_name)
if missing_inputs:
raise ValueError(f"Missing or empty required inputs: {missing_inputs}")
return input_variables
def _store_outputs(self, config, result):
"""Store outputs in project attributes"""
for output in config.outputs:
if hasattr(self, output):
setattr(self, output, result)
print(f"Stored output {output} with value: {result}")
def execute_prompt(self, prompt_name: str, input_variables: Dict[str, Any] = None) -> str:
"""Execute a prompt with given input variables synchronously"""
print(f"Attempting to execute prompt: {prompt_name}")
# if prompt_name not in PROMPTS:
if prompt_name not in self._db_config:
raise ValueError(f"Unknown prompt: {prompt_name}")
# config = PROMPTS[prompt_name]
config = self._db_config[prompt_name]
input_variables = self._validate_and_fill_inputs(config, input_variables)
prompt = self._build_prompt(config, input_variables)
print(f"Final prompt to be executed: {prompt}")
# Execute prompt with appropriate model
result = (
call_4o_mini(prompt)
if config.model == ModelType.GPT_4O_MINI
else call_o1_mini(prompt)
)
print(f"Result from executing prompt: {result[:800]}")
self._store_outputs(config, result)
return result
async def async_execute_prompt(self, prompt_name: str, input_variables: Dict[str, Any] = None) -> str:
"""Execute a prompt with given input variables asynchronously"""
print(f"Attempting to execute prompt: {prompt_name}")
# if prompt_name not in PROMPTS:
if prompt_name not in self._db_config:
raise ValueError(f"Unknown prompt: {prompt_name}")
# config = PROMPTS[prompt_name]
config = self._db_config[prompt_name]
input_variables = self._validate_and_fill_inputs(config, input_variables)
prompt = self._build_prompt(config, input_variables)
# print(f"Final prompt to be executed: {prompt}")
# Execute prompt with appropriate model
result = (
await async_call_o1_mini(prompt)
if config.model == ModelType.O1_MINI
else await async_call_4o_mini(prompt)
)
# print(f"Result from executing prompt: {result[:800]}")
self._store_outputs(config, result)
return result
def get_db_config(self):
"""Get the configuration fetched from the database"""
return self._db_config
def set_db_config(self, value):
"""Set the configuration fetched from the database"""
self._db_config = value
def load_config_from_db(self):
"""Load and parse the latest configuration from the database"""
try:
raw_config = get_latest_prompt_from_db()
parser = ConfigParser(raw_config)
parsed_config = parser.parse_config()
self._db_config = parsed_config
if parsed_config:
msg = f"Successfully reloaded {len(parsed_config)} prompts from database"
else:
msg = "No prompts found in database config"
print(msg)
return msg
except Exception as e:
print(f"Error loading config from database: {str(e)}")
raise
#Functions to interact with common_functions_v4.py#
def set_rubric(self, rubric_list):
"""Set the rubric for the project"""
self.rubric = rubric_list
def set_rubric_section_names(self, section_names):
"""Set the rubric section names for the project"""
self.rubric_section_names = section_names
def set_component_list(self, component_list):
"""Set the component list for the project"""
self.component_list = component_list
def get_project_detail(self):
"""Get the project details as a formatted string"""
return "\n".join(self.project_detail) if self.project_detail else ""
def add_project_detail(self, detail):
"""Add a new project detail"""
if detail:
self.project_detail.append(detail)
def reset_project(self):
"""Reset all project attributes"""
print("Resetting project")
self.project_detail = []
self.load_config_from_db()
# for config in PROMPTS.values():
for config in self._db_config.values():
for output in config.outputs:
setattr(self, output, "")
async def generate_client_initial_question(self):
"""Generate follow-up questions after initial client response"""
# return PROMPTS["client_initial_question"].prompt
return self._db_config["client_initial_question"].prompt
async def generate_client_follow_up(self):
"""Generate follow-up questions after initial client response"""
return await self.async_execute_prompt(
"generate_client_follow_up",
{
"project_detail": self.get_project_detail()
}
)
#TODO: To change
async def gather_project_input(self, prompt_name):
"""Generate context-aware questions to gather project requirements"""
return await self.async_execute_prompt(
f"{prompt_name}",
{
"project_detail": self.get_project_detail()
}
)
async def generate_general_questions(self):
"""Review project input and generate general deployment / intergretion questions to address gaps"""
return await self.async_execute_prompt(
"generate_general_questions",
{
"project_detail": self.get_project_detail()
}
)
async def generate_further_follow_up_questions(self):
"""Review project input and generate follow-up questions to address gaps"""
return await self.async_execute_prompt(
"generate_further_follow_up_questions",
{
"project_detail": self.get_project_detail()
}
)
##########################################################
def _parse_json_response(self, response: str) -> Any:
try:
# If response is not a string, return as-is
if not isinstance(response, str):
return response
# Extract JSON from code blocks if present
if "```json" in response:
response = response.split("```json")[1].split("```")[0].strip()
elif "```" in response:
response = response.split("```")[1].split("```")[0].strip()
return json.loads(response)
except json.JSONDecodeError:
return response
def _get_input_from_previous_results(self) -> Any:
"""Get input value from previous results"""
# For step_1 results (PRD generation))
return None
## Generate PRD and components from project details ##
def generate_prd_and_components(self, progress=gr.Progress()):
"""Generate PRD and components from project details, streaming results"""
results = []
# Generate PRD
yield "Generating PRD...", results
prd_response = self.execute_prompt(
"generate_prd",
{"project_detail": self.get_project_detail()}
)
log_prompt(PROMPTS['generate_prd'].step,
PROMPTS['generate_prd'].description,
PROMPTS["generate_prd"].prompt,
prd_response)
# log_prompt(self._db_config["generate_prd"].step,
# self._db_config["generate_prd"].description,
# self._db_config["generate_prd"].prompt,
# prd_response)
# Parse and format the PRD response
try:
prd_json = self._parse_json_response(prd_response)
# Extract PRD content
if isinstance(prd_json, dict):
if "detailed_breakdown" in prd_json:
self.generated_prd = prd_json["detailed_breakdown"]
formatted_prd = {
"function_name": "generate_prd",
"result": {
"detailed_breakdown": prd_json["detailed_breakdown"],
"summary": prd_json.get("summary", "")
}
}
else:
self.generated_prd = prd_json
formatted_prd = {
"function_name": "generate_prd",
"result": prd_json
}
else:
self.generated_prd = str(prd_json)
formatted_prd = {
"function_name": "generate_prd",
"result": str(prd_json)
}
# Add formatted PRD to results
results.append(formatted_prd)
yield "PRD generation complete", results
except Exception as e:
print(f"Warning: Could not parse PRD: {str(e)}")
self.generated_prd = prd_response
results.append({
"function_name": "generate_prd",
"result": prd_response
})
yield "PRD generation complete", results
try:
yield "Analyzing configuration with component agent...", results
configuration_output = self.execute_prompt(
"component_agent",
{"generated_prd": self.generated_prd}
)
log_prompt(PROMPTS['component_agent'].step,
PROMPTS['component_agent'].description,
PROMPTS["component_agent"].prompt,
configuration_output)
# Parse and format configuration output
try:
config = self._parse_json_response(configuration_output)
self.config = config
formatted_config = {
"function_name": "component_agent",
"result": json.dumps(config, indent=2)
}
results.append(formatted_config)
selected_functions = config[0]["selected_functions"]
yield f"Selected {len(selected_functions)} components to generate", results
except (KeyError, IndexError) as e:
yield f"Warning: Could not parse configuration output ({str(e)})", results
return
except Exception as e:
yield f"Error in analyzing configuration: {str(e)}", results
return
# Execute each function and stream results
for i, function_name in enumerate(selected_functions, 1):
try:
yield f"Generating component {i}/{len(selected_functions)}: {function_name}...", results
result = self.execute_prompt(function_name)
log_prompt(PROMPTS[function_name].step,
PROMPTS[function_name].description,
PROMPTS[function_name].prompt,
result)
# Format the component result
try:
parsed_result = self._parse_json_response(result)
formatted_result = {
"function_name": function_name,
"result": json.dumps(parsed_result, indent=2) if isinstance(parsed_result, (dict, list)) else str(parsed_result)
}
results.append(formatted_result)
yield f"Successfully generated {function_name}", results
except Exception as e:
print(f"Warning: Error formatting result for {function_name}: {str(e)}")
results.append({
"function_name": function_name,
"result": str(result)
})
yield f"Generated {function_name} (raw format)", results
except Exception as e:
yield f"Error executing {function_name}: {str(e)}", results
continue
yield "All components generated successfully!", results
def generate_mandays_estimate(self, progress=gr.Progress()):
"""Generate mandays estimation based on configuration type and selected functions, streaming results"""
results = []
try:
if not hasattr(self, 'config'):
yield "Configuration not found. Please run 'generate_prd_and_components' first.", results , None
return
config = self.config
configuration_type = config[0]["configuration_type"]
yield f"Configuration type detected: {configuration_type}", results , None
# Map configuration type to enum
try:
config_enum = ConfigurationType(configuration_type)
except ValueError:
yield f"Unsupported configuration type: {configuration_type}", results , None
return
# Get functions to execute based on configuration type
functions_to_execute = CONFIGURATION_TYPE_FUNCTIONS.get(config_enum, [])
if not functions_to_execute:
yield f"No functions defined for configuration type: {configuration_type}", results , None
return
# Execute each function and stream results
for function_name in functions_to_execute:
try:
yield f"Executing {function_name}...", results , None
# Execute the prompt with gathered input variables
result = self.execute_prompt(function_name)
log_prompt(PROMPTS[function_name].step,
PROMPTS[function_name].description,
PROMPTS[function_name].prompt,
result)
# Process CSV sections if there's a section break
if "----SECTION BREAK----" in result:
sections = result.split("----SECTION BREAK----")
sections = [section.strip().replace('```csv', '').replace('```', '') for section in sections]
processed_result = {}
doc_csv = StringIO(sections[0].strip())
try:
doc_df = pd.read_csv(doc_csv, keep_default_na=False)
except pd.errors.ParserError as e:
print(f"Error processing Document Extraction CSV: {str(e)}")
continue
chat_csv = StringIO(sections[1].strip())
try:
chat_df = pd.read_csv(chat_csv, keep_default_na=False)
except pd.errors.ParserError as e:
print(f"Error processing Chatbot CSV: {str(e)}")
continue
processed_result = {
f"{function_name}": pd.concat([
doc_df.assign(section='Document Extraction'),
chat_df.assign(section='Chatbot')
]).to_dict('records')
}
else:
# Single CSV processing with error handling
try:
# Clean up the CSV data
clean_result = (result
.replace('```csv', '')
.replace('```', '')
.strip()
.replace('\r\n', '\n')
.replace('\r', '\n')
)
csv_data = StringIO(clean_result)
df = pd.read_csv(csv_data,
keep_default_na=False,
quoting=csv.QUOTE_ALL,
escapechar='\\',
on_bad_lines='warn'
)
processed_result = {
f"{function_name}": df.to_dict('records')
}
except Exception as e:
print(f"Error processing CSV: {str(e)}")
continue
# Format and store results
formatted_result = {
"function_name": function_name,
"result": processed_result
}
results.append(formatted_result)
yield f"Successfully completed {function_name}", results , None
except Exception as e:
print(f"Error executing {function_name}: {str(e)}")
yield f"Error in {function_name}: {str(e)}", results , None
continue
total_mandays, total_cost, estimated_months = calculate_mandays_and_costs(results)
general_cost_summary = f"""Original Estimate:
Total Mandays: {total_mandays:.2f}
Total Cost: ${total_cost:,.2f}
({estimated_months:.2f} months)"""
self.general_cost_summary = general_cost_summary
# Store the results for later recalculation
self.mandays_results = results
yield "Mandays estimation completed!", results, general_cost_summary
except Exception as e:
print(f"Error in generate_mandays_estimate: {str(e)}")
yield f"Error during mandays estimation: {str(e)}", results , None
def analyze_mvp_components(self, progress=gr.Progress()):
"""Analyze MVP components based on configuration type and selected functions, streaming results"""
results = []
try:
if not hasattr(self, 'config'):
yield "Configuration not found. Please run 'generate_prd_and_components' first.", results
return
config = self.config
configuration_type = config[0]["configuration_type"]
yield f"Configuration type detected: {configuration_type}", results
# Map configuration type to enum
try:
config_enum = ConfigurationType(configuration_type)
except ValueError:
yield f"Unsupported configuration type: {configuration_type}", results
return
# Get functions to execute based on configuration type
functions_to_execute = ANALYZE_COMPONENTS_FUNCTIONS.get(config_enum, [])
if not functions_to_execute:
yield f"No functions defined for configuration type: {configuration_type}", results
return
# Execute each function and stream results
for function_name in functions_to_execute:
try:
yield f"Executing {function_name}...", results
# Execute the prompt with gathered input variables
result = self.execute_prompt(function_name)
log_prompt(PROMPTS[function_name].step,
PROMPTS[function_name].description,
PROMPTS[function_name].prompt,
result)
# Clean up the CSV data
clean_result = (result
.replace('```csv', '')
.replace('```', '')
.strip()
.replace('\r\n', '\n')
.replace('\r', '\n')
)
csv_data = StringIO(clean_result)
df = pd.read_csv(csv_data,
keep_default_na=False,
quoting=csv.QUOTE_ALL,
escapechar='\\',
on_bad_lines='warn'
)
processed_result = {
f"{function_name}": df.to_dict('records')
}
# Format and store results
formatted_result = {
"function_name": function_name,
"result": processed_result
}
results.append(formatted_result)
yield f"Successfully completed {function_name}", results
except Exception as e:
print(f"Error executing {function_name}: {str(e)}")
yield f"Error in {function_name}: {str(e)}", results
continue
except Exception as e:
print(f"Error in analyzing_mvp_components: {str(e)}")
yield f"Error during mvp components analysis: {str(e)}", results
def recalculate_mvp_mandays(self, progress=gr.Progress()):
"""Recalculate MVP Mandays based on configuration type and selected functions, streaming results"""
results = []
try:
if not hasattr(self, 'config'):
yield "Configuration not found. Please run 'generate_prd_and_components' first.", results
return
config = self.config
configuration_type = config[0]["configuration_type"]
yield f"Configuration type detected: {configuration_type}", results
# Map configuration type to enum
try:
config_enum = ConfigurationType(configuration_type)
except ValueError:
yield f"Unsupported configuration type: {configuration_type}", results
return
# Get functions to execute based on configuration type
functions_to_execute = RECALCULATE_MVP_MANDAYS_FUNCTIONS.get(config_enum, [])
if not functions_to_execute:
yield f"No functions defined for configuration type: {configuration_type}", results
return
# Execute each function and stream results
for function_name in functions_to_execute:
try:
yield f"Executing {function_name}...", results
result = self.execute_prompt(function_name)
log_prompt(PROMPTS[function_name].step,
PROMPTS[function_name].description,
PROMPTS[function_name].prompt,
result)
# Format the component result
try:
formatted_result = {
"function_name": function_name,
"result": result
}
results.append(formatted_result)
yield f"Successfully generated {function_name}", results
except Exception as e:
print(f"Warning: Error formatting result for {function_name}: {str(e)}")
results.append({
"function_name": function_name,
"result": str(result)
})
yield f"Generated {function_name}", results
except Exception as e:
yield f"Error executing {function_name}: {str(e)}", results
continue
except Exception as e:
print(f"Error in recalculate_mvp_mandays: {str(e)}")
yield f"Error during mvp components analysis: {str(e)}", results
yield "Analysis completed", results
def generate_mvp_mandays(self, progress=gr.Progress()):
"""Generate MVP Mandays based on configuration type and selected functions, streaming results"""
results =[]
yield "Generating MVP Mandays...", results , None
try:
if not hasattr(self, 'config'):
yield "Configuration not found.", [] , None
return
config = self.config
configuration_type = config[0]["configuration_type"]
yield f"Configuration type detected: {configuration_type}", [] , None
# Map configuration type to enum
try:
config_enum = ConfigurationType(configuration_type)
except ValueError:
yield f"Unsupported configuration type: {configuration_type}", [] , None
return
# Get functions to execute based on configuration type
functions_to_execute = GENERATE_MVP_MANDAYS_FUNCTIONS.get(config_enum, [])
if not functions_to_execute:
yield f"No functions defined for configuration type: {configuration_type}", [] , None
return
for function_name in functions_to_execute:
try:
yield f"Executing {function_name}...", results , None
# Execute the prompt with gathered input variables
result = self.execute_prompt(function_name)
log_prompt(PROMPTS[function_name].step,
PROMPTS[function_name].description,
PROMPTS[function_name].prompt,
result)
# Process CSV sections if there's a section break
if "----SECTION BREAK----" in result:
sections = result.split("----SECTION BREAK----")
# Clean up the CSV data before parsing
sections = [section.strip().replace('```csv', '').replace('```', '') for section in sections]
processed_result = {}
# Process each section with error handling
for i, section in enumerate(sections):
try:
clean_section = (section
.replace('\r\n', '\n')
.replace('\r', '\n')
)
csv_data = StringIO(clean_section)
df = pd.read_csv(csv_data,
keep_default_na=False,
quoting=csv.QUOTE_ALL,
escapechar='\\',
on_bad_lines='warn'
)
# Use appropriate section names based on function and index
if function_name == "generate_page_MVP_mandays":
section_name = 'MVP Plan_Test' if i == 0 else 'MVP Dev'
elif function_name == "generate_engage_MVP_mandays":
section_name = 'MVP Plan_Test' if i == 0 else ('MVP Dev' if i == 1 else 'MVP Intents')
else:
# Default section names if function is not recognized
section_name = f'Section_{i}'
# Convert mandays column to float and handle any non-numeric values
df['mandays'] = pd.to_numeric(df['mandays'], errors='coerce').fillna(0)
processed_result[section_name] = df.to_dict('records')
except Exception as e:
print(f"Error processing section {i}: {str(e)}")
processed_result[f'section_{i}_error'] = str(e)
# Format and store results
formatted_result = {
"function_name": function_name,
"result": processed_result
}
results.append(formatted_result)
yield f"Successfully completed {function_name}", results, None
except Exception as e:
print(f"Error executing {function_name}: {str(e)}")
yield f"Error in {function_name}: {str(e)}", results, None
continue
total_mvp_mandays, total_mvp_cost, estimated_mvp_months = calculate_mvp_mandays_and_costs(results)
mvp_cost_summary = f"""MVP Estimate:
Total Mandays: {total_mvp_mandays:.2f}
Total Cost: ${total_mvp_cost:,.2f}
({estimated_mvp_months:.2f} months)"""
self.mvp_cost_summary = mvp_cost_summary
if hasattr(self, 'general_cost_summary'):
self.combined_cost_summary = f"""
{self.general_cost_summary}
{mvp_cost_summary}"""
else:
self.combined_cost_summary = mvp_cost_summary
# Store the results for later recalculation
self.mvp_mandays_results = results
yield "MVP Mandays generation completed!", results, self.combined_cost_summary
except Exception as e:
print(f"Error in generating MVP mandays: {str(e)}")
yield f"Error in generating MVP mandays: {str(e)}", []
def generate_final_documentation(self, progress=gr.Progress()):
"""Generate Final Documentation based on configuration type and selected functions, streaming results"""
results = []
try:
if not hasattr(self, 'config'):
yield "Configuration not found.", []
return
config = self.config
configuration_type = config[0]["configuration_type"]
yield f"Configuration type detected: {configuration_type}", []
# Map configuration type to enum
try:
config_enum = ConfigurationType(configuration_type)
except ValueError:
yield f"Unsupported configuration type: {configuration_type}", []
return
# Get functions to execute based on configuration type
functions_to_execute = GENERATE_FINAL_DOCUMENT_FUNCTIONS.get(config_enum, [])
if not functions_to_execute:
yield f"No functions defined for configuration type: {configuration_type}", []
return
for function_name in functions_to_execute:
try:
yield f"Executing {function_name}...", results
result = self.execute_prompt(function_name)
log_prompt(PROMPTS[function_name].step,
PROMPTS[function_name].description,
PROMPTS[function_name].prompt,
result)
# Parse JSON with improved handling
try:
if isinstance(result, str):
# Remove any JSON code block markers
clean_result = re.sub(r'```json\s*|\s*```', '', result.strip())
# Parse the JSON
parsed_result = json.loads(clean_result)
# Directly use the values from the parsed JSON as markdown
formatted_result = {
"function_name": function_name,
"result": parsed_result["scope_summary"] + "\n\n" +
parsed_result["modules_and_functional_requirements"] + "\n\n" +
parsed_result["out_of_scope"] + "\n\n" +
parsed_result["system_flow"]
}
else:
formatted_result = {
"function_name": function_name,
"result": result
}
results.append(formatted_result)
except json.JSONDecodeError:
results.append({
"function_name": function_name,
"result": result
})
except Exception as e:
print(f"Error executing {function_name}: {str(e)}")
yield f"Error in {function_name}: {str(e)}", results
continue
yield f"Successfully completed {function_name}", results
yield "Final Documentation Generation completed!", results
except Exception as e:
print(f"Error in generating Final Documentation: {str(e)}")
yield f"Error in generating Final Documentation: {str(e)}", []
def recalculate_mandays_costs(self, progress=gr.Progress()):
"""Recalculate mandays and costs for both original and MVP estimates"""
try:
# Recalculate original estimate
original_results = []
if hasattr(self, 'generated_plan_test_mandays') and self.generated_plan_test_mandays:
try:
plan_test_data = self.generated_plan_test_mandays
if isinstance(plan_test_data, dict):
# Get the configuration type functions from the plan test data
for function_name in plan_test_data.keys():
if function_name.startswith('generate_') and function_name.endswith('_plan_test_mandays'):
original_results.append({
'function_name': function_name,
'result': plan_test_data
})
break
except Exception as e:
print(f"Error processing plan test mandays: {str(e)}")
if hasattr(self, 'generated_dev_mandays') and self.generated_dev_mandays:
try:
dev_data = self.generated_dev_mandays
if isinstance(dev_data, dict) and dev_data.get('generate_dev_mandays'):
original_results.append({
'function_name': 'generate_dev_mandays',
'result': dev_data
})
except Exception as e:
print(f"Error processing dev mandays: {str(e)}")
# Calculate original estimate
total_mandays, total_cost, estimated_months = calculate_mandays_and_costs(original_results)
# Format original estimate summary
self.general_cost_summary = f"""Original Estimate:
Total Mandays: {total_mandays:.2f}
Total Cost: ${total_cost:,.2f}
({estimated_months:.2f} months)"""
# Recalculate MVP estimate
mvp_results = []
if hasattr(self, 'generated_MVP_mandays') and self.generated_MVP_mandays:
try:
mvp_data = self.generated_MVP_mandays
if isinstance(mvp_data, dict):
mvp_results.append({
'function_name': 'generate_MVP_mandays',
'result': mvp_data
})
except Exception as e:
print(f"Error processing MVP mandays: {str(e)}")
# Calculate MVP estimate
total_mvp_mandays, total_mvp_cost, estimated_mvp_months = calculate_mvp_mandays_and_costs(mvp_results)
# Format MVP estimate summary
self.mvp_cost_summary = f"""MVP Estimate:
Total MVP Mandays: {total_mvp_mandays:.2f}
Total MVP Cost: ${total_mvp_cost:,.2f}
({estimated_mvp_months:.2f} months)"""
# Combine both summaries
self.combined_cost_summary = f"""
{self.general_cost_summary}
{self.mvp_cost_summary}"""
return self.combined_cost_summary
except Exception as e:
print(f"Error in recalculating mandays and costs: {str(e)}")
return "Error recalculating estimates"