import evoagentx.workflow.operators as operator import examples.aflow.mbpp_new_full.optimized.round_3.prompt as prompt_custom from evoagentx.models.model_configs import LLMConfig from evoagentx.benchmark.benchmark import Benchmark from evoagentx.models.model_utils import create_llm_instance class Workflow: def __init__( self, name: str, llm_config: LLMConfig, benchmark: Benchmark ): self.name = name self.llm = create_llm_instance(llm_config) self.benchmark = benchmark self.custom = operator.Custom(self.llm) self.custom_code_generate = operator.CustomCodeGenerate(self.llm) self.test = operator.Test(self.llm) self.sc_ensemble = operator.ScEnsemble(self.llm) async def __call__(self, problem: str, entry_point: str): """ Implementation of the workflow """ solution = await self.custom_code_generate(problem=problem, entry_point=entry_point, instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT) test_result = await self.test(problem=problem, solution=solution['response'], entry_point=entry_point, benchmark=self.benchmark) if not test_result['result']: alternate_solutions = await self.custom(input=problem, instruction=prompt_custom.ALTERNATE_SOLUTIONS_PROMPT) selected_solution = await self.sc_ensemble(solutions=alternate_solutions['response'], problem=problem) revision_response = await self.custom(input=problem+f" Current Solution: {selected_solution['response']}", instruction=prompt_custom.REVISE_PROMPT) # Retest the revised solution to ensure correctness test_result_revised = await self.test(problem=problem, solution=revision_response['response'], entry_point=entry_point, benchmark=self.benchmark) if test_result_revised['result']: return revision_response['response'] # Return the revised and validated solution else: return "Revised solution still failed the tests." # Inform about failure in testing of the revised solution return solution['response']