import evoagentx.workflow.operators as operator import examples.aflow.humaneval.optimized.round_20.prompt as prompt_custom from evoagentx.models.model_configs import LLMConfig from evoagentx.benchmark.benchmark import Benchmark from evoagentx.models.model_utils import create_llm_instance class Workflow: def __init__( self, name: str, llm_config: LLMConfig, benchmark: Benchmark ): self.name = name self.llm = create_llm_instance(llm_config) self.benchmark = benchmark self.custom = operator.Custom(self.llm) self.custom_code_generate = operator.CustomCodeGenerate(self.llm) self.test = operator.Test(self.llm) self.sc_ensemble = operator.ScEnsemble(self.llm) async def __call__(self, problem: str, entry_point: str): solutions = [] # Initialize a list to collect multiple solutions for _ in range(3): # Generate three variations of the solution try: # Error Handling solution = await self.custom_code_generate(problem=problem, entry_point=entry_point, instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT) # Validate if the solution is not empty and is a valid Python code if 'response' in solution and solution['response'].strip(): solutions.append(solution['response']) # Collect each valid solution else: solutions.append("Generated code is empty or invalid.") # Append error message except Exception as e: # Catch any exceptions solutions.append(f"Error generating code: {str(e)}") # Add error message selected_solution = await self.sc_ensemble(solutions=solutions, problem=problem) # Select best solution validation = await self.test(problem=problem, solution=selected_solution['response'], entry_point=entry_point, benchmark=self.benchmark) if validation['result']: return selected_solution['response'] else: modified_solution = await self.custom(input=problem + f" with issues: {validation['solution']}", instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT) return modified_solution['response'] # Return the modified solution if tests fail