File size: 2,447 Bytes
5374a2d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import evoagentx.workflow.operators as operator
import examples.aflow.humaneval.optimized.round_20.prompt as prompt_custom
from evoagentx.models.model_configs import LLMConfig
from evoagentx.benchmark.benchmark import Benchmark
from evoagentx.models.model_utils import create_llm_instance

class Workflow:

    def __init__(
        self,
        name: str,
        llm_config: LLMConfig,
        benchmark: Benchmark
    ):
        self.name = name
        self.llm = create_llm_instance(llm_config)
        self.benchmark = benchmark 
        self.custom = operator.Custom(self.llm)
        self.custom_code_generate = operator.CustomCodeGenerate(self.llm)
        self.test = operator.Test(self.llm)
        self.sc_ensemble = operator.ScEnsemble(self.llm)

    async def __call__(self, problem: str, entry_point: str):
        solutions = []  # Initialize a list to collect multiple solutions
        for _ in range(3):  # Generate three variations of the solution
            try:  # Error Handling
                solution = await self.custom_code_generate(problem=problem, entry_point=entry_point, 
                                                            instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT)
                # Validate if the solution is not empty and is a valid Python code
                if 'response' in solution and solution['response'].strip():
                    solutions.append(solution['response'])  # Collect each valid solution
                else:
                    solutions.append("Generated code is empty or invalid.")  # Append error message
            except Exception as e:  # Catch any exceptions
                solutions.append(f"Error generating code: {str(e)}")  # Add error message

        selected_solution = await self.sc_ensemble(solutions=solutions, problem=problem)  # Select best solution
        validation = await self.test(problem=problem, solution=selected_solution['response'], 
                                      entry_point=entry_point, benchmark=self.benchmark)

        if validation['result']:
            return selected_solution['response']
        else:
            modified_solution = await self.custom(input=problem + f" with issues: {validation['solution']}", 
                                                   instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT)
            return modified_solution['response']  # Return the modified solution if tests fail