MunanNing commited on
Commit
f8863d7
·
verified ·
1 Parent(s): 75cafd9

upload evluate code

Browse files
Evaluate Codes/1-Qwen-7b.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers.generation import GenerationConfig
4
+ import torch
5
+ import json
6
+ import os
7
+ from tqdm import tqdm
8
+ from functools import partial
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
11
+
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ "Qwen/Qwen-7B-Chat",
14
+ device_map="cuda:0",
15
+ trust_remote_code=True, fp16=True
16
+ ).eval()
17
+
18
+
19
+ def generate_output(file_path, max_new_tokens=128, prefix=""):
20
+ with open(file_path, 'r', encoding='utf-8') as file:
21
+ json_data = json.load(file)
22
+ output = {}
23
+ for index in tqdm(range(len(json_data)), desc=file_path):
24
+ item = json_data[index]
25
+ instruction = item.get("instruction")
26
+ question = item.get("question")
27
+ answer = item.get("answer")
28
+
29
+ chat_input = instruction + "\n" + question
30
+ chat_input = chat_input[:2000]
31
+ prediction, history = model.chat(tokenizer, chat_input, history=[], max_new_tokens=64,
32
+ do_sample=False, use_cache=True,)
33
+ # prefix=prefix)
34
+ output[str(index)] = {
35
+ "origin_prompt": chat_input,
36
+ "prediction": prediction,
37
+ "refr": answer
38
+ }
39
+ return output
40
+
41
+
42
+ folder_path = "../national_test"
43
+ output_path = "./Qwen-7B-Chat"
44
+ if not os.path.exists(output_path):
45
+ os.mkdir(output_path)
46
+ id_to_task = {
47
+ "4-1": generate_output,
48
+ "4-2": generate_output,
49
+ "4-3": generate_output,
50
+ "4-4": generate_output,
51
+ "4-5": generate_output,
52
+ }
53
+ for filename in os.listdir(folder_path):
54
+ if filename.endswith('.json'):
55
+ task_name = filename.split(".")[0]
56
+ if task_name not in id_to_task:
57
+ continue
58
+ file_path = os.path.join(folder_path, filename)
59
+ if os.path.exists(os.path.join(output_path, filename)):
60
+ continue
61
+ output = id_to_task[task_name](file_path)
62
+ if not os.path.exists(output_path):
63
+ os.mkdir(output_path)
64
+ with open(os.path.join(output_path, filename), "w", encoding='utf-8') as file:
65
+ json.dump(output, file, ensure_ascii=False)
66
+
67
+ # %%
Evaluate Codes/2-InternLM2-7b.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers.generation import GenerationConfig
4
+ import torch
5
+ import json
6
+ import os
7
+ from tqdm import tqdm
8
+ from functools import partial
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained("internlm/internlm2-chat-7b", trust_remote_code=True)
11
+
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ "internlm/internlm2-chat-7b",
14
+ device_map="cuda:1",
15
+ trust_remote_code=True, torch_dtype=torch.float16
16
+ ).eval()
17
+
18
+
19
+ def generate_output(file_path, max_new_tokens=128, prefix=""):
20
+ with open(file_path, 'r', encoding='utf-8') as file:
21
+ json_data = json.load(file)
22
+ output = {}
23
+ for index in tqdm(range(len(json_data)), desc=file_path):
24
+ item = json_data[index]
25
+ instruction = item.get("instruction")
26
+ question = item.get("question")
27
+ answer = item.get("answer")
28
+
29
+ chat_input = instruction + "\n" + question
30
+ chat_input = chat_input[:2000]
31
+ prediction, history = model.chat(tokenizer, chat_input, history=[], max_new_tokens=64,
32
+ do_sample=False, use_cache=True,)
33
+ # prefix=prefix)
34
+ output[str(index)] = {
35
+ "origin_prompt": chat_input,
36
+ "prediction": prediction,
37
+ "refr": answer
38
+ }
39
+ return output
40
+
41
+
42
+ folder_path = "../national_test"
43
+ output_path = "./InternLM2-7B-Chat"
44
+ if not os.path.exists(output_path):
45
+ os.mkdir(output_path)
46
+ id_to_task = {
47
+ "4-1": generate_output,
48
+ "4-2": generate_output,
49
+ "4-3": generate_output,
50
+ "4-4": generate_output,
51
+ "4-5": generate_output,
52
+ }
53
+ for filename in os.listdir(folder_path):
54
+ if filename.endswith('.json'):
55
+ task_name = filename.split(".")[0]
56
+ if task_name not in id_to_task:
57
+ continue
58
+ file_path = os.path.join(folder_path, filename)
59
+ if os.path.exists(os.path.join(output_path, filename)):
60
+ continue
61
+ output = id_to_task[task_name](file_path)
62
+ if not os.path.exists(output_path):
63
+ os.mkdir(output_path)
64
+ with open(os.path.join(output_path, filename), "w", encoding='utf-8') as file:
65
+ json.dump(output, file, ensure_ascii=False)
66
+
67
+ # %%
Evaluate Codes/3-Baichuan2-7b.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers.generation import GenerationConfig
4
+ import torch
5
+ import json
6
+ import os
7
+ from tqdm import tqdm
8
+ from functools import partial
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan2-7B-Chat", trust_remote_code=True)
11
+ model = AutoModelForCausalLM.from_pretrained(
12
+ "baichuan-inc/Baichuan2-7B-Chat",
13
+ device_map="cuda:2",
14
+ trust_remote_code=True, torch_dtype=torch.float16
15
+ ).eval()
16
+ model.generation_config = GenerationConfig.from_pretrained("baichuan-inc/Baichuan2-7B-Chat")
17
+
18
+
19
+ def generate_output(file_path, max_new_tokens=128, prefix=""):
20
+ with open(file_path, 'r', encoding='utf-8') as file:
21
+ json_data = json.load(file)
22
+ output = {}
23
+ for index in tqdm(range(len(json_data)), desc=file_path):
24
+ item = json_data[index]
25
+ instruction = item.get("instruction")
26
+ question = item.get("question")
27
+ answer = item.get("answer")
28
+
29
+ chat_input = instruction + "\n" + question
30
+ chat_input = chat_input[:2000]
31
+
32
+ messages = []
33
+ messages.append({"role": "user", "content": chat_input})
34
+ prediction = model.chat(tokenizer, messages)
35
+
36
+ # prefix=prefix)
37
+ output[str(index)] = {
38
+ "origin_prompt": chat_input,
39
+ "prediction": prediction,
40
+ "refr": answer
41
+ }
42
+ return output
43
+
44
+
45
+ folder_path = "../national_test"
46
+ output_path = "./Baichuan2-7B-Chat"
47
+ if not os.path.exists(output_path):
48
+ os.mkdir(output_path)
49
+ id_to_task = {
50
+ "4-1": generate_output,
51
+ "4-2": generate_output,
52
+ "4-3": generate_output,
53
+ "4-4": generate_output,
54
+ "4-5": generate_output,
55
+ }
56
+ for filename in os.listdir(folder_path):
57
+ if filename.endswith('.json'):
58
+ task_name = filename.split(".")[0]
59
+ if task_name not in id_to_task:
60
+ continue
61
+ file_path = os.path.join(folder_path, filename)
62
+ if os.path.exists(os.path.join(output_path, filename)):
63
+ continue
64
+ output = id_to_task[task_name](file_path)
65
+ if not os.path.exists(output_path):
66
+ os.mkdir(output_path)
67
+ with open(os.path.join(output_path, filename), "w", encoding='utf-8') as file:
68
+ json.dump(output, file, ensure_ascii=False)
69
+
70
+ # %%
Evaluate Codes/4-ChatGLM2-6b.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from transformers.generation import GenerationConfig
4
+ import torch
5
+ import json
6
+ import os
7
+ from tqdm import tqdm
8
+ from functools import partial
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm2-6b", trust_remote_code=True)
11
+ model = AutoModelForCausalLM.from_pretrained(
12
+ "THUDM/chatglm2-6b",
13
+ device_map="cuda:4",
14
+ trust_remote_code=True, torch_dtype=torch.float16
15
+ ).eval()
16
+
17
+
18
+ def generate_output(file_path, max_new_tokens=128, prefix=""):
19
+ with open(file_path, 'r', encoding='utf-8') as file:
20
+ json_data = json.load(file)
21
+ output = {}
22
+ for index in tqdm(range(len(json_data)), desc=file_path):
23
+ item = json_data[index]
24
+ instruction = item.get("instruction")
25
+ question = item.get("question")
26
+ answer = item.get("answer")
27
+
28
+ chat_input = instruction + "\n" + question
29
+ chat_input = chat_input[:2000]
30
+ prediction, history = model.chat(tokenizer, chat_input, history=[])
31
+
32
+ output[str(index)] = {
33
+ "origin_prompt": chat_input,
34
+ "prediction": prediction,
35
+ "refr": answer
36
+ }
37
+ return output
38
+
39
+
40
+ folder_path = "../national_test"
41
+ output_path = "./ChatGLM2-6B-Chat"
42
+ if not os.path.exists(output_path):
43
+ os.mkdir(output_path)
44
+ id_to_task = {
45
+ "4-1": generate_output,
46
+ "4-2": generate_output,
47
+ "4-3": generate_output,
48
+ "4-4": generate_output,
49
+ "4-5": generate_output,
50
+ }
51
+ for filename in os.listdir(folder_path):
52
+ if filename.endswith('.json'):
53
+ task_name = filename.split(".")[0]
54
+ if task_name not in id_to_task:
55
+ continue
56
+ file_path = os.path.join(folder_path, filename)
57
+ if os.path.exists(os.path.join(output_path, filename)):
58
+ continue
59
+ output = id_to_task[task_name](file_path)
60
+ if not os.path.exists(output_path):
61
+ os.mkdir(output_path)
62
+ with open(os.path.join(output_path, filename), "w", encoding='utf-8') as file:
63
+ json.dump(output, file, ensure_ascii=False)
64
+
65
+ # %%
Evaluate Codes/5-fuzimingcha-7b.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # %%
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel
3
+ from transformers.generation import GenerationConfig
4
+ import torch
5
+ import json
6
+ import os
7
+ from tqdm import tqdm
8
+ from functools import partial
9
+
10
+ # 设置 HTTP 代理
11
+ os.environ['http_proxy'] = 'http://127.0.0.1:7890'
12
+
13
+ # 设置 HTTPS 代理
14
+ os.environ['https_proxy'] = 'http://127.0.0.1:7890'
15
+
16
+ # 设置GPU
17
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
18
+
19
+ # 验证环境变量是否设置成功
20
+ print(os.environ['http_proxy'])
21
+ print(os.environ['https_proxy'])
22
+
23
+ tokenizer = AutoTokenizer.from_pretrained("SDUIRLab/fuzi-mingcha-v1_0", trust_remote_code=True)
24
+ model = AutoModel.from_pretrained("SDUIRLab/fuzi-mingcha-v1_0", trust_remote_code=True).half().cuda()
25
+
26
+
27
+ def generate_output(file_path, max_new_tokens=128, prefix=""):
28
+ with open(file_path, 'r', encoding='utf-8') as file:
29
+ json_data = json.load(file)
30
+ output = {}
31
+ for index in tqdm(range(len(json_data)), desc=file_path):
32
+ item = json_data[index]
33
+ instruction = item.get("instruction")
34
+ question = item.get("question")
35
+ answer = item.get("answer")
36
+
37
+ chat_input = instruction + "\n" + question
38
+ chat_input = chat_input[:2000]
39
+ prediction, history = model.chat(tokenizer, chat_input, history=[])
40
+ output[str(index)] = {
41
+ "origin_prompt": chat_input,
42
+ "prediction": prediction,
43
+ "refr": answer
44
+ }
45
+ return output
46
+
47
+
48
+ folder_path = "../national_test"
49
+ output_path = "./Fuzi-Mingcha"
50
+ if not os.path.exists(output_path):
51
+ os.mkdir(output_path)
52
+ id_to_task = {
53
+ "4-1": generate_output,
54
+ "4-2": generate_output,
55
+ "4-3": generate_output,
56
+ "4-4": generate_output,
57
+ "4-5": generate_output,
58
+ }
59
+ for filename in os.listdir(folder_path):
60
+ if filename.endswith('.json'):
61
+ task_name = filename.split(".")[0]
62
+ if task_name not in id_to_task:
63
+ continue
64
+ file_path = os.path.join(folder_path, filename)
65
+ if os.path.exists(os.path.join(output_path, filename)):
66
+ continue
67
+ output = id_to_task[task_name](file_path)
68
+ if not os.path.exists(output_path):
69
+ os.mkdir(output_path)
70
+ with open(os.path.join(output_path, filename), "w", encoding='utf-8') as file:
71
+ json.dump(output, file, ensure_ascii=False)
72
+
73
+ # %%