| import evoagentx.workflow.operators as operator | |
| import examples.aflow.pertqa.optimized_reploge.round_7.prompt as prompt_custom | |
| from evoagentx.models.model_configs import LLMConfig | |
| from evoagentx.benchmark.benchmark import Benchmark | |
| from evoagentx.models.model_utils import create_llm_instance | |
| class Workflow: | |
| def __init__( | |
| self, | |
| name: str, | |
| llm_config: LLMConfig, | |
| benchmark: Benchmark | |
| ): | |
| self.name = name | |
| self.llm = create_llm_instance(llm_config) | |
| self.benchmark = benchmark | |
| self.custom = operator.Custom(self.llm) | |
| self.answer_generate = operator.AnswerGenerate(self.llm) | |
| self.ensemble = operator.QAScEnsemble(self.llm) | |
| async def __call__(self, problem: str): | |
| """ | |
| Implementation of the workflow | |
| """ | |
| refined_problem = await self.custom(input=problem, instruction="Refine the problem statement for clarity.") | |
| solution = await self.answer_generate(input=refined_problem['response']) | |
| solutions = [solution['answer']] | |
| ensemble_result = await self.ensemble(solutions=solutions) | |
| return ensemble_result['response'] | |