File size: 2,142 Bytes
5374a2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import evoagentx.workflow.operators as operator
import examples.aflow.mbpp_new_full.optimized.round_9.prompt as prompt_custom
from evoagentx.models.model_configs import LLMConfig
from evoagentx.benchmark.benchmark import Benchmark
from evoagentx.models.model_utils import create_llm_instance

class Workflow:
    
    def __init__(
        self,
        name: str,
        llm_config: LLMConfig,
        benchmark: Benchmark
    ):
        self.name = name
        self.llm = create_llm_instance(llm_config)
        self.benchmark = benchmark 
        self.custom = operator.Custom(self.llm)
        self.custom_code_generate = operator.CustomCodeGenerate(self.llm)
        self.test = operator.Test(self.llm)
        self.sc_ensemble = operator.ScEnsemble(self.llm)  
        self.revise_custom = operator.Custom(self.llm)  # Added to allow for contextual revising

    async def __call__(self, problem: str, entry_point: str):
        solution_candidates = []
        for _ in range(3):  
            try:
                solution = await self.custom_code_generate(problem=problem, entry_point=entry_point, instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT)
                solution_candidates.append(solution['response'])
            except Exception as e:
                # Handle or log error details as needed
                print(f"Error during code generation: {e}")

        # Validate each solution candidate before proceeding to ensemble
        validated_solutions = []
        for candidate in solution_candidates:
            test_result = await self.test(problem=problem, solution=candidate, entry_point=entry_point, benchmark=self.benchmark)
            if test_result['result']:
                validated_solutions.append(candidate)

        final_solution = await self.sc_ensemble(solutions=validated_solutions, problem=problem)
        if not final_solution:
            revision_response = await self.revise_custom(input=problem + " Current Solution: " + final_solution['response'], instruction=prompt_custom.REVISE_PROMPT)
            return revision_response['response']  
        return final_solution['response']