File size: 2,124 Bytes
fdf190d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import json
import multiprocessing
import os

from evalplus._experimental.type_mut_for_eff import TypedMutEffGen
from evalplus.data import HUMANEVAL_PLUS_INPUTS_PATH, get_human_eval_plus

HUMANEVAL_PLUS_BIG_INPUTS_PATH = "/home/yuyao/eval-plus/HumanEvalPlusBigInputs"


def main():
    problems = get_human_eval_plus()
    for p in problems:
        print(f"{p['task_id']}...")
        filename = p["task_id"].replace("/", "_")
        big_input_path = os.path.join(
            HUMANEVAL_PLUS_BIG_INPUTS_PATH, f"{filename}.json"
        )

        if os.path.exists(big_input_path):
            continue
        inputs = p["base_input"]
        signature = p["entry_point"]
        contract_code = p["prompt"] + p["contract"] + p["canonical_solution"]

        def input_generation(inputs, signature, contract_code):
            try:
                gen = TypedMutEffGen(inputs, signature, contract_code)
                new_inputs = gen.generate()
                results.append(new_inputs)
            except:
                with open("fail.txt", "a") as f:
                    f.write(f"{signature} failed")
                results.append("fail")

        manager = multiprocessing.Manager()
        results = manager.list()
        proc = multiprocessing.Process(
            target=input_generation, args=(inputs, signature, contract_code)
        )
        proc.start()
        proc.join(timeout=300)
        if proc.is_alive():
            proc.terminate()
            proc.kill()
            continue
        if len(results) == 0 or type(results[0]) == str:
            continue
        new_inputs = results[0]

        new_input_dict = dict()
        new_input_dict["task_id"] = p["task_id"]
        new_input_dict["inputs"] = []
        new_input_dict["sd"] = []
        for item in new_inputs:
            new_input_dict["inputs"].append(item.inputs)
            new_input_dict["sd"].append(item.fluctuate_ratio)
        with open(
            os.path.join(HUMANEVAL_PLUS_BIG_INPUTS_PATH, f"{filename}.json"), "w"
        ) as f:
            json.dump(new_input_dict, f)


if __name__ == "__main__":
    main()