File size: 2,424 Bytes
5374a2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import evoagentx.workflow.operators as operator
import examples.aflow.mbpp_new_full.optimized.round_4.prompt as prompt_custom
from evoagentx.models.model_configs import LLMConfig
from evoagentx.benchmark.benchmark import Benchmark
from evoagentx.models.model_utils import create_llm_instance

class Workflow:
    
    def __init__(
        self,
        name: str,
        llm_config: LLMConfig,
        benchmark: Benchmark
    ):
        self.name = name
        self.llm = create_llm_instance(llm_config)
        self.benchmark = benchmark 
        self.custom = operator.Custom(self.llm)
        self.custom_code_generate = operator.CustomCodeGenerate(self.llm)
        self.test = operator.Test(self.llm)
        self.sc_ensemble = operator.ScEnsemble(self.llm)  
        self.revise_custom = operator.Custom(self.llm)
        self.logging_enabled = True  # Added flag to enable/disable logging

    async def log(self, message: str):
        if self.logging_enabled:
            print(message)  # Simple logging output, can be replaced with advanced logging

    async def __call__(self, problem: str, entry_point: str):
        await self.log(f"Starting to generate solutions for the problem: {problem}")
        solution_candidates = []
        for _ in range(3):  
            await self.log("Generating solution candidate...")
            solution = await self.custom_code_generate(problem=problem, entry_point=entry_point, instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT)
            solution_candidates.append(solution['response'])
            await self.log(f"Generated solution candidate: {solution['response']}")

        final_solution = await self.sc_ensemble(solutions=solution_candidates, problem=problem)
        await self.log(f"Final solution selected: {final_solution['response']}")
        
        test_result = await self.test(problem=problem, solution=final_solution['response'], entry_point=entry_point, benchmark=self.benchmark)
        
        if not test_result['result']:
            await self.log("Test failed, revising solution...")
            revision_response = await self.revise_custom(input=problem + " Current Solution: " + final_solution['response'], instruction=prompt_custom.REVISE_PROMPT)
            await self.log(f"Revised solution: {revision_response['response']}")
            return revision_response['response']  
        
        return final_solution['response']