import evoagentx.workflow.operators as operator import examples.aflow.pertqa.optimized_adamson_update.round_18.prompt as prompt_custom from evoagentx.models.model_configs import LLMConfig from evoagentx.benchmark.benchmark import Benchmark from evoagentx.models.model_utils import create_llm_instance class Workflow: def __init__( self, name: str, llm_config: LLMConfig, benchmark: Benchmark ): self.name = name self.llm = create_llm_instance(llm_config) self.benchmark = benchmark self.custom = operator.Custom(self.llm) self.answer_generate = operator.AnswerGenerate(self.llm) self.qas_ensemble = operator.QAScEnsemble(self.llm) async def __call__(self, problem: str): """ Implementation of the workflow """ solutions_list = [] for _ in range(3): # Collecting multiple answers solution = await self.answer_generate(input=problem) solutions_list.append(solution['answer']) review_responses = [] for answer in solutions_list: # Review each generated answer review_response = await self.custom(input=problem + " Review the answer: " + answer, instruction=prompt_custom.Review_PROMPT) review_responses.append(review_response['response']) ensemble_response = await self.qas_ensemble(solutions=review_responses) # Added self-consistency check before final review if ensemble_response['response'] not in solutions_list: consistency_check = await self.custom(input=problem + " Check consistency of the ensemble answer: " + ensemble_response['response'], instruction=prompt_custom.Review_PROMPT) solutions_list.append(consistency_check['response']) final_review_response = await self.custom(input=problem + " Final review of the ensemble answer: " + ensemble_response['response'], instruction=prompt_custom.Review_PROMPT) return final_review_response['response'] # Return the final reviewed response