File size: 2,222 Bytes
5374a2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import evoagentx.workflow.operators as operator
import examples.aflow.scicode_full.optimized.round_7.prompt as prompt_custom
from evoagentx.models.model_configs import LLMConfig
from evoagentx.benchmark.benchmark import Benchmark
from evoagentx.models.model_utils import create_llm_instance

class Workflow:
    
    def __init__(
        self,
        name: str,
        llm_config: LLMConfig,
        benchmark: Benchmark
    ):
        self.name = name
        self.llm = create_llm_instance(llm_config)
        self.benchmark = benchmark 
        self.custom = operator.Custom(self.llm)
        self.custom_code_generate = operator.CustomCodeGenerate(self.llm)
        self.test_operator = operator.Test(self.llm)  # Initialized the test operator

    async def __call__(self, problem: str, entry_point: str):
        """
        Implementation of the workflow
        Custom operator to generate anything you want.
        But when you want to get standard code, you should use custom_code_generate operator.
        """
        solution = await self.custom_code_generate(problem=problem, entry_point=entry_point, instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT)
        test_result = await self.test_operator.test(problem=problem, solution=solution['response'], entry_point=entry_point, benchmark=self.benchmark)  # Testing the solution
        if not test_result['result']:  # If the test fails, generate an alternative solution to improve chances of success
            alternative_solution = await self.custom_code_generate(problem=problem, entry_point=entry_point, instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT)
            test_result_alt = await self.test_operator.test(problem=problem, solution=alternative_solution['response'], entry_point=entry_point, benchmark=self.benchmark)
            if test_result_alt['result']:
                return {"success": True, "final_solution": alternative_solution['response']}  # Return the alternative verified solution
            return {"success": False, "current_solution": test_result['solution']}  # If both fail, log the current solution
        return {"success": True, "final_solution": solution['response']}  # Return the final verified solution