import evoagentx.workflow.operators as operator import examples.aflow.pertqa.optimized_adamson_update.round_20.prompt as prompt_custom from evoagentx.models.model_configs import LLMConfig from evoagentx.benchmark.benchmark import Benchmark from evoagentx.models.model_utils import create_llm_instance class Workflow: def __init__( self, name: str, llm_config: LLMConfig, benchmark: Benchmark ): self.name = name self.llm = create_llm_instance(llm_config) self.benchmark = benchmark self.custom = operator.Custom(self.llm) self.answer_generate = operator.AnswerGenerate(self.llm) self.qas_ensemble = operator.QAScEnsemble(self.llm) async def __call__(self, problem: str): """ Implementation of the workflow """ solution = await self.answer_generate(input=problem) solutions_list = [solution['answer']] review_response = await self.custom(input=problem + " Review the answer: " + solution['answer'], instruction=prompt_custom.Review_PROMPT) # Added review step solutions_list.append(review_response['response']) # Collecting reviewed solution ensemble_response = await self.qas_ensemble(solutions=solutions_list) consistency_check = await self.custom(input=problem + " Check the consistency of the ensemble answer: " + ensemble_response['response'], instruction=prompt_custom.Review_PROMPT) # Added consistency check final_review_response = await self.custom(input=problem + " Final review of the ensemble answer: " + consistency_check['response'], instruction=prompt_custom.Review_PROMPT) # Added final review step return final_review_response['response'] # Return the final reviewed response