|
|
import evoagentx.workflow.operators as operator |
|
|
import examples.aflow.humaneval.optimized.round_7.prompt as prompt_custom |
|
|
from evoagentx.models.model_configs import LLMConfig |
|
|
from evoagentx.benchmark.benchmark import Benchmark |
|
|
from evoagentx.models.model_utils import create_llm_instance |
|
|
|
|
|
class Workflow: |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
name: str, |
|
|
llm_config: LLMConfig, |
|
|
benchmark: Benchmark |
|
|
): |
|
|
self.name = name |
|
|
self.llm = create_llm_instance(llm_config) |
|
|
self.benchmark = benchmark |
|
|
self.custom = operator.Custom(self.llm) |
|
|
self.custom_code_generate = operator.CustomCodeGenerate(self.llm) |
|
|
self.test = operator.Test(self.llm) |
|
|
self.sc_ensemble = operator.ScEnsemble(self.llm) |
|
|
self.validate_solution = operator.Custom(self.llm) |
|
|
|
|
|
async def __call__(self, problem: str, entry_point: str): |
|
|
""" |
|
|
Workflow implementation to generate, validate, and test solutions. |
|
|
""" |
|
|
solutions = [] |
|
|
for _ in range(3): |
|
|
solution = await self.custom_code_generate(problem=problem, entry_point=entry_point, instruction=prompt_custom.GENERATE_PYTHON_CODE_PROMPT) |
|
|
solutions.append(solution['response']) |
|
|
|
|
|
best_solution = await self.sc_ensemble(solutions=solutions, problem=problem) |
|
|
|
|
|
|
|
|
validation_result = await self.validate_solution(input=best_solution['response'], instruction="Validate this Python code.") |
|
|
if validation_result['response'] == "Valid": |
|
|
test_result = await self.test(problem=problem, solution=best_solution['response'], entry_point=entry_point, benchmark=self.benchmark) |
|
|
return test_result['solution'] |
|
|
else: |
|
|
return "Best solution failed validation." |
|
|
|