moshouxiaomu commited on
Commit
47383d6
·
verified ·
1 Parent(s): ff481a9

Upload 10 files

Browse files
eval_pat_1.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import re
3
+ from rdkit import Chem
4
+ from rdkit.Chem import AllChem
5
+ from rdkit import DataStructs
6
+ from tqdm import tqdm
7
+ import ast
8
+ import openai_api
9
+ import datetime
10
+ import time
11
+ from zoneinfo import ZoneInfo
12
+ import argparse
13
+ # import llama3_8b_instruct as llama
14
+
15
+ def canonical_smiles(smiles):
16
+ """将SMILES转换为规范形式"""
17
+ try:
18
+ mol = Chem.MolFromSmiles(smiles)
19
+ return Chem.MolToSmiles(mol) if mol else None
20
+ except:
21
+ return None
22
+
23
+ def parse_last_smiles(response):
24
+ """从回复中提取最后一个<SMILES>标签内容"""
25
+ matches = re.findall(r'<SMILES>(.*?)</SMILES>', response, re.DOTALL)
26
+ return matches[-1].strip() if matches else None
27
+
28
+ def calculate_metrics(pred_smiles, true_smiles_list):
29
+ """计算评估指标(支持多标准答案)"""
30
+ # 验证预测SMILES有效性
31
+ valid = 1 if Chem.MolFromSmiles(pred_smiles) else 0
32
+ pred_canon = canonical_smiles(pred_smiles) if valid else None
33
+
34
+ # 初始化指标
35
+ max_fts = 0.0
36
+ exact_match = 0
37
+
38
+ # 仅当预测有效时进行计算
39
+ if valid and pred_canon:
40
+ try:
41
+ mol_pred = Chem.MolFromSmiles(pred_canon)
42
+ fp_pred = AllChem.GetMorganFingerprintAsBitVect(mol_pred, 2, nBits=2048)
43
+
44
+ # 遍历所有标准答案
45
+ for true_smiles in true_smiles_list:
46
+ true_canon = true_smiles
47
+ # 检查精确匹配
48
+ if pred_canon == true_canon:
49
+ exact_match = 1
50
+
51
+ # 计算相似度
52
+ mol_true = Chem.MolFromSmiles(true_canon)
53
+ fp_true = AllChem.GetMorganFingerprintAsBitVect(mol_true, 2, nBits=2048)
54
+ similarity = DataStructs.TanimotoSimilarity(fp_pred, fp_true)
55
+
56
+ # 保留最高相似度
57
+ if similarity > max_fts:
58
+ max_fts = similarity
59
+ except:
60
+ pass
61
+
62
+ return {
63
+ 'Valid': valid,
64
+ 'Exact_match': exact_match,
65
+ 'FTS': max_fts
66
+ }
67
+
68
+ def evaluate_dataset(input_path, output_path, model_name):
69
+ """执行完整评估流程"""
70
+ # 读取数据
71
+ df = pd.read_csv(input_path)
72
+ df['Intermediate'] = df['Intermediate'].apply(ast.literal_eval)
73
+ results = []
74
+ total_samples_processed = 0
75
+ valid_sum = 0
76
+ exact_match_sum = 0
77
+ fts_sum = 0
78
+ # 遍历每个样本
79
+ for index, row in tqdm(df.iterrows(), total=len(df), miniters=10):
80
+ try:
81
+ # 获取模型standard回复
82
+ true_smiles = row['Intermediate']
83
+
84
+ # 标准提问
85
+ response_std = Eval_LLM.attempt_api_call(system_prompt, row['Question_std'], model=model_name)
86
+ pred_smiles_std = parse_last_smiles(response_std)
87
+ if pred_smiles_std == None:
88
+ pred_smiles_std = response_std # gpt没有输出<SMILES>就直接用输出
89
+ metrics_std = {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
90
+ # 计算指标
91
+ if pred_smiles_std:
92
+ metrics_std = calculate_metrics(pred_smiles_std, true_smiles)
93
+
94
+ # 创建独立字典
95
+ result_std = row.to_dict()
96
+ result_std.update({
97
+ 'Model_response': response_std,
98
+ 'Predicted_SMILES': pred_smiles_std,
99
+ 'Prompt_Type': 'Standard',
100
+ **metrics_std
101
+ })
102
+ results.append(result_std)
103
+
104
+ # 更新统计计数器
105
+ total_samples_processed += 1
106
+ valid_sum += metrics_std['Valid']
107
+ exact_match_sum += metrics_std['Exact_match']
108
+ fts_sum += metrics_std['FTS']
109
+
110
+ # CoT提问
111
+ # response_CoT = Eval_LLM.attempt_api_call(system_prompt, row['Question_CoT'], model=model_name)
112
+ # pred_smiles_CoT = parse_last_smiles(response_CoT)
113
+ # metrics_CoT = {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
114
+ # # 计算指标
115
+ # if pred_smiles_CoT:
116
+ # metrics_CoT = calculate_metrics(pred_smiles_CoT, true_smiles)
117
+
118
+
119
+ # 保存两种结果
120
+ # result_cot = row.to_dict()
121
+ # result_cot.update({
122
+ # 'Model_response': response_CoT,
123
+ # 'Predicted_SMILES': pred_smiles_CoT,
124
+ # 'Prompt_Type': 'CoT',
125
+ # **metrics_CoT
126
+ # })
127
+ # results.append(result_cot)
128
+ except Exception as e:
129
+ print(f"[ERROR] Error processing sample {index}: {e}")
130
+ print(f"[INFO] Processed {total_samples_processed} samples so far.")
131
+ if total_samples_processed > 0:
132
+ avg_valid = valid_sum / total_samples_processed
133
+ avg_exact = exact_match_sum / total_samples_processed
134
+ avg_fts = fts_sum / total_samples_processed
135
+ print(f"[INFO] Avg Valid: {avg_valid:.4f}, Exact: {avg_exact:.4f}, FTS: {avg_fts:.4f}")
136
+ # 继续处理下一个样本
137
+
138
+ # 保存结果
139
+ result_df = pd.DataFrame(results)
140
+ result_df.to_csv(output_path, index=False)
141
+
142
+ # 分组统计指标
143
+ summary = result_df.groupby('Prompt_Type').agg({
144
+ 'Valid': 'mean',
145
+ 'Exact_match': 'mean',
146
+ 'FTS': 'mean'
147
+ }).reset_index()
148
+
149
+ print("\nEvaluation Summary:")
150
+ for _, row in summary.iterrows():
151
+ print(f"\nPrompt Type: {row['Prompt_Type']}")
152
+ print(f"Validity_rate: {row['Valid']:.4f}")
153
+ print(f"Exact_match_rate: {row['Exact_match']:.4f}")
154
+ print(f"Average_FTS: {row['FTS']:.4f}")
155
+
156
+ def wait_until_target_time():
157
+ # 设置时区为北京时间
158
+ tz = ZoneInfo("Asia/Shanghai")
159
+ now = datetime.datetime.now(tz)
160
+
161
+ # 构造目标时间
162
+ target_time = now.replace(hour=0, minute=25, second=0, microsecond=0)
163
+
164
+ # 如果当前时间已经过了今天23点,则目标时间改为明天23点
165
+ if now >= target_time:
166
+ target_time += datetime.timedelta(days=1)
167
+
168
+ # 计算需要休眠的时间差
169
+ delta = target_time - now
170
+ sleep_seconds = delta.total_seconds()
171
+
172
+ print(f"距离0:25北京时间还剩{sleep_seconds:.2f}秒,开始休眠...")
173
+ time.sleep(sleep_seconds)
174
+
175
+ # 使用示例
176
+ if __name__ == "__main__":
177
+ parser = argparse.ArgumentParser(description='Evaluate models on chemical dataset.')
178
+ parser.add_argument('--model_name', type=str, required=True, help='Name of the model to evaluate (e.g., gpt-4o, deepseek).')
179
+ parser.add_argument('--start_index', type=int, default=1, help='Start index for output files (default: 1).')
180
+ parser.add_argument('--end_index', type=int, default=6, help='End index for output files (exclusive, default: 6).')
181
+ parser.add_argument('--api_from', type=str, default="xunfei", help='End index for output files (exclusive, default: 6).')
182
+ args = parser.parse_args()
183
+
184
+ Eval_LLM = openai_api.OpenAIClient(api_from=args.api_from)
185
+ system_prompt = 'You are an expert in chemistry.'
186
+
187
+ # wait_until_target_time()
188
+ input_path = '/home/xshe/KG/eval/benchmark/new_benchmark/new_pattern_1.csv'
189
+ output_base_path = f'/home/xshe/KG/eval/benchmark/new_results/pat1_results_{args.model_name}_'
190
+ for i in range(args.start_index, args.end_index): # 生成3个输出文件
191
+ output_path = f"{output_base_path}{i}.csv"
192
+ evaluate_dataset(input_path, output_path, args.model_name)
eval_pat_2.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import re
3
+ from rdkit import Chem
4
+ from rdkit.Chem import AllChem
5
+ from rdkit import DataStructs
6
+ from tqdm import tqdm
7
+ import ast
8
+ import openai_api
9
+ import datetime
10
+ import time
11
+ from zoneinfo import ZoneInfo
12
+ # import ChemDFM
13
+ import argparse
14
+
15
+ def canonical_smiles(smiles):
16
+ """将SMILES转换为规范形式"""
17
+ try:
18
+ mol = Chem.MolFromSmiles(smiles)
19
+ return Chem.MolToSmiles(mol) if mol else None
20
+ except:
21
+ return None
22
+
23
+ def parse_two_smiles(response):
24
+ """
25
+ 从回复中解析出最多两个 <SMILES> ... </SMILES> 的内容。
26
+ 如果只匹配到一个,则返回 [pred1, None];
27
+ 如果一个都没匹配到,则返回 [None, None]。
28
+ """
29
+ matches = re.findall(r'<SMILES>(.*?)</SMILES>', response, re.DOTALL)
30
+ # 取最后两个
31
+ if len(matches) >= 2:
32
+ return [matches[-2].strip(), matches[-1].strip()]
33
+ elif len(matches) == 1:
34
+ return [matches[0].strip(), None]
35
+ else:
36
+ return [None, None]
37
+
38
+ def evaluate_prediction_against_answer(pred_smiles, answer):
39
+ """"评估单个预测与特定答案的匹配情况,返回指标"""
40
+ if answer is None:
41
+ if not pred_smiles:
42
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
43
+ pred_mol = Chem.MolFromSmiles(pred_smiles)
44
+ if pred_mol is None:
45
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
46
+ else:
47
+ return {'Valid': 1, 'Exact_match': 0, 'FTS': 0.0}
48
+
49
+ # 处理答案有效性
50
+ ans_mol = Chem.MolFromSmiles(answer)
51
+ if ans_mol is None:
52
+ raise ValueError(f"答案 {answer} 无效")
53
+ ans_canon = Chem.MolToSmiles(ans_mol)
54
+
55
+ # 处理预测有效性
56
+ if not pred_smiles:
57
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
58
+ pred_mol = Chem.MolFromSmiles(pred_smiles)
59
+ if pred_mol is None:
60
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
61
+ pred_canon = Chem.MolToSmiles(pred_mol)
62
+
63
+ # 计算Exact Match
64
+ exact_match = 1 if pred_canon == ans_canon else 0
65
+
66
+ # 计算FTS
67
+ fp_pred = AllChem.GetMorganFingerprintAsBitVect(pred_mol, 2, nBits=2048)
68
+ fp_ans = AllChem.GetMorganFingerprintAsBitVect(ans_mol, 2, nBits=2048)
69
+ fts = DataStructs.TanimotoSimilarity(fp_pred, fp_ans)
70
+
71
+ return {
72
+ 'Valid': 1,
73
+ 'Exact_match': exact_match,
74
+ 'FTS': fts
75
+ }
76
+
77
+ def evaluate_two_predictions(pred_smiles_1, pred_smiles_2, answer_list):
78
+ """计算评估指标(支持多标准答案)"""
79
+ max_em, max_fts = -1, -1
80
+ best_metric_1, best_metric_2 = {}, {}
81
+ for answer in answer_list:
82
+ metric_1 = evaluate_prediction_against_answer(pred_smiles_1, answer[0])
83
+ metric_2 = evaluate_prediction_against_answer(pred_smiles_2, answer[1])
84
+ avg_em = (metric_1['Exact_match'] + metric_2['Exact_match']) / 2
85
+ avg_fts = (metric_1['FTS'] + metric_2['FTS']) / 2
86
+ if (avg_em > max_em) or \
87
+ (avg_em == max_em and avg_fts > max_fts):
88
+ max_em = avg_em
89
+ max_fts = avg_fts
90
+ best_metric_1 = metric_1
91
+ best_metric_2 = metric_2
92
+ return best_metric_1, best_metric_2
93
+
94
+ def evaluate_dataset(input_path, output_path, model_name):
95
+ """执行完整评估流程"""
96
+ # 读取数据
97
+ df = pd.read_csv(input_path)
98
+ results = []
99
+
100
+ # 遍历每个样本
101
+ for _, row in tqdm(df.iterrows(), total=len(df)):
102
+ answer_list = ast.literal_eval(row['Intermediate'])
103
+ # 标准提问
104
+ try:
105
+ response_std = Eval_LLM.attempt_api_call(system_prompt, row['Question_std'], model=model_name, temperature=0.2)
106
+ pred_smiles_std_1, pred_smiles_std_2 = parse_two_smiles(response_std)
107
+ metrics_std_1, metrics_std_2 = evaluate_two_predictions(
108
+ pred_smiles_std_1, pred_smiles_std_2, list(answer_list)
109
+ )
110
+ valid_std = (metrics_std_1['Valid'] + metrics_std_2['Valid']) / 2
111
+ exact_std = (metrics_std_1['Exact_match'] + metrics_std_2['Exact_match']) / 2
112
+ fts_std = (metrics_std_1['FTS'] + metrics_std_2['FTS']) / 2
113
+
114
+ # CoT提问
115
+ # response_CoT = Eval_LLM.attempt_api_call(system_prompt, row['Question_CoT'], model=model_name, temperature=0.3)
116
+ # pred_cot_1, pred_cot_2 = parse_two_smiles(response_CoT)
117
+ # metrics_cot_1, metrics_cot_2 = evaluate_two_predictions(
118
+ # pred_cot_1, pred_cot_2, list(answer_list)
119
+ # )
120
+ # valid_cot = (metrics_cot_1['Valid'] + metrics_cot_2['Valid']) / 2
121
+ # exact_cot = (metrics_cot_1['Exact_match'] + metrics_cot_2['Exact_match']) / 2
122
+ # fts_cot = (metrics_cot_1['FTS'] + metrics_cot_2['FTS']) / 2
123
+
124
+ # 保存结果
125
+ result = row.to_dict()
126
+ result.update({
127
+ 'Model_response_std': response_std,
128
+ # 'Model_response_cot': response_CoT,
129
+ # 标准问题预测结果
130
+ 'Pred_SMILES_std_1': pred_smiles_std_1,
131
+ 'Pred_SMILES_std_2': pred_smiles_std_2,
132
+ 'Valid_std_1': metrics_std_1['Valid'],
133
+ 'Exact_match_std_1': metrics_std_1['Exact_match'],
134
+ 'FTS_std_1': metrics_std_1['FTS'],
135
+ 'Valid_std_2': metrics_std_2['Valid'],
136
+ 'Exact_match_std_2': metrics_std_2['Exact_match'],
137
+ 'FTS_std_2': metrics_std_2['FTS'],
138
+ 'Valid_std': valid_std,
139
+ 'Exact_match_std': exact_std,
140
+ 'FTS_std': fts_std,
141
+ # CoT问题预测结果
142
+ # 'Pred_SMILES_cot_1': pred_cot_1,
143
+ # 'Pred_SMILES_cot_2': pred_cot_2,
144
+ # 'Valid_cot_1': metrics_cot_1['Valid'],
145
+ # 'Exact_match_cot_1': metrics_cot_1['Exact_match'],
146
+ # 'FTS_cot_1': metrics_cot_1['FTS'],
147
+ # 'Valid_cot_2': metrics_cot_2['Valid'],
148
+ # 'Exact_match_cot_2': metrics_cot_2['Exact_match'],
149
+ # 'FTS_cot_2': metrics_cot_2['FTS'],
150
+ # 'Valid_cot': valid_cot,
151
+ # 'Exact_match_cot': exact_cot,
152
+ # 'FTS_cot': fts_cot
153
+ })
154
+ results.append(result)
155
+ except:
156
+ print("出现错误")
157
+ continue
158
+
159
+ # 存结果
160
+ result_df = pd.DataFrame(results)
161
+ result_df.to_csv(output_path, index=False)
162
+
163
+ # 计算汇总指标
164
+ summary = {
165
+ 'Validity_rate_std': result_df['Valid_std'].mean(),
166
+ 'Exact_match_rate_std': result_df['Exact_match_std'].mean(),
167
+ 'Average_FTS_std': result_df['FTS_std'].mean()
168
+ # 'Validity_rate_cot': result_df['Valid_cot'].mean(),
169
+ # 'Exact_match_rate_cot': result_df['Exact_match_cot'].mean(),
170
+ # 'Average_FTS_cot': result_df['FTS_cot'].mean()
171
+ }
172
+
173
+ print("\nEvaluation Summary:")
174
+ print("Standard Questions:")
175
+ print(f"Validity Rate: {summary['Validity_rate_std']:.4f}")
176
+ print(f"Exact Match Rate: {summary['Exact_match_rate_std']:.4f}")
177
+ print(f"Average FTS: {summary['Average_FTS_std']:.4f}")
178
+ # print("\nCoT Questions:")
179
+ # print(f"Validity Rate: {summary['Validity_rate_cot']:.4f}")
180
+ # print(f"Exact Match Rate: {summary['Exact_match_rate_cot']:.4f}")
181
+ # print(f"Average FTS: {summary['Average_FTS_cot']:.4f}")
182
+ return summary
183
+
184
+ def wait_until_target_time():
185
+ # 设置时区为北京时间
186
+ tz = ZoneInfo("Asia/Shanghai")
187
+ now = datetime.datetime.now(tz)
188
+
189
+ # 构造目标时间
190
+ target_time = now.replace(hour=0, minute=25, second=0, microsecond=0)
191
+
192
+ # 如果当前时间已经过了今天23点,则目标时间改为明天23点
193
+ if now >= target_time:
194
+ target_time += datetime.timedelta(days=1)
195
+
196
+ # 计算需要休眠的时间差
197
+ delta = target_time - now
198
+ sleep_seconds = delta.total_seconds()
199
+
200
+ print(f"距离0:25北京时间还剩{sleep_seconds:.2f}秒,开始休眠...")
201
+ time.sleep(sleep_seconds)
202
+
203
+ # 使用示例
204
+ if __name__ == "__main__":
205
+ parser = argparse.ArgumentParser(description='Evaluate models on chemical dataset.')
206
+ parser.add_argument('--model_name', type=str, required=True, help='Name of the model to evaluate (e.g., gpt-4o, deepseek).')
207
+ parser.add_argument('--start_index', type=int, default=1, help='Start index for output files (default: 1).')
208
+ parser.add_argument('--end_index', type=int, default=6, help='End index for output files (exclusive, default: 6).')
209
+ parser.add_argument('--api_from', type=str, default="xunfei", help='End index for output files (exclusive, default: 6).')
210
+ args = parser.parse_args()
211
+
212
+ Eval_LLM = openai_api.OpenAIClient(api_from=args.api_from)
213
+ system_prompt = 'You are an expert in chemistry.'
214
+
215
+ # wait_until_target_time()
216
+ input_path = '/home/xshe/KG/eval/benchmark/new_benchmark/new_pattern_5.csv'
217
+ output_base_path = f'/home/xshe/KG/eval/benchmark/new_results/pat5_results_{args.model_name}_'
218
+ all_summary = []
219
+ for i in range(args.start_index, args.end_index): # 生成3个输出文件
220
+ output_path = f"{output_base_path}{i}.csv"
221
+ summary = evaluate_dataset(input_path, output_path, args.model_name)
222
+ all_summary.append(summary)
223
+
224
+ for summary in all_summary:
225
+ print("\nEvaluation Summary:")
226
+ print("Standard Questions:")
227
+ print(f"Validity Rate: {summary['Validity_rate_std']:.4f}")
228
+ print(f"Exact Match Rate: {summary['Exact_match_rate_std']:.4f}")
229
+ print(f"Average FTS: {summary['Average_FTS_std']:.4f}")
230
+ # print("\nCoT Questions:")
231
+ # print(f"Validity Rate: {summary['Validity_rate_cot']:.4f}")
232
+ # print(f"Exact Match Rate: {summary['Exact_match_rate_cot']:.4f}")
233
+ # print(f"Average FTS: {summary['Average_FTS_cot']:.4f}")
eval_pat_3.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import re
3
+ import ast
4
+ from rdkit import Chem
5
+ from rdkit.Chem import AllChem
6
+ from rdkit import DataStructs
7
+ from tqdm import tqdm
8
+ import itertools
9
+ import openai_api
10
+ import datetime
11
+ import time
12
+ from zoneinfo import ZoneInfo
13
+ import argparse
14
+
15
+ def canonical_smiles(smiles):
16
+ """将SMILES转换为规范形式"""
17
+ try:
18
+ mol = Chem.MolFromSmiles(smiles)
19
+ return Chem.MolToSmiles(mol) if mol else None
20
+ except:
21
+ return None
22
+
23
+ def parse_two_smiles(response):
24
+ """
25
+ 从回复中解析出最多两个 <SMILES> ... </SMILES> 的内容。
26
+ 如果只匹配到一个,则返回 [pred1, None];
27
+ 如果一个都没匹配到,则返回 [None, None]。
28
+ """
29
+ matches = re.findall(r'<SMILES>(.*?)</SMILES>', response, re.DOTALL)
30
+ # 取最后两个
31
+ if len(matches) >= 2:
32
+ return [matches[-1].strip(), matches[-2].strip()]
33
+ elif len(matches) == 1:
34
+ return [matches[0].strip(), None]
35
+ else:
36
+ return [None, None]
37
+
38
+ def evaluate_prediction_against_answer(pred_smiles, answer):
39
+ """"评估单个预测与特定答案的匹配情况,返回指标"""
40
+ if answer is None:
41
+ if not pred_smiles:
42
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
43
+ pred_mol = Chem.MolFromSmiles(pred_smiles)
44
+ if pred_mol is None:
45
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
46
+ else:
47
+ return {'Valid': 1, 'Exact_match': 0, 'FTS': 0.0}
48
+
49
+ # 处理答案有效性
50
+ ans_mol = Chem.MolFromSmiles(answer)
51
+ if ans_mol is None:
52
+ raise ValueError(f"答案 {answer} 无效")
53
+ ans_canon = Chem.MolToSmiles(ans_mol)
54
+
55
+ # 处理预测有效性
56
+ if not pred_smiles:
57
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
58
+ pred_mol = Chem.MolFromSmiles(pred_smiles)
59
+ if pred_mol is None:
60
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
61
+ pred_canon = Chem.MolToSmiles(pred_mol)
62
+
63
+ # 计算Exact Match
64
+ exact_match = 1 if pred_canon == ans_canon else 0
65
+
66
+ # 计算FTS
67
+ fp_pred = AllChem.GetMorganFingerprintAsBitVect(pred_mol, 2, nBits=2048)
68
+ fp_ans = AllChem.GetMorganFingerprintAsBitVect(ans_mol, 2, nBits=2048)
69
+ fts = DataStructs.TanimotoSimilarity(fp_pred, fp_ans)
70
+
71
+ return {
72
+ 'Valid': 1,
73
+ 'Exact_match': exact_match,
74
+ 'FTS': fts
75
+ }
76
+
77
+ def evaluate_two_predictions(pred1, pred2, answer_list):
78
+ """评估两个预测与答案列表的最优匹配,返回最佳指标组合"""
79
+ best_metrics1 = {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
80
+ best_metrics2 = {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
81
+ best_total_em = -1
82
+ best_avg_fts = -1.0
83
+
84
+ if not answer_list:
85
+ m1 = evaluate_prediction_against_answer(pred1, None)
86
+ m2 = evaluate_prediction_against_answer(pred2, None)
87
+ return m1, m2, 0, 0.0
88
+
89
+ possible_pairs = []
90
+ if len(answer_list) >= 2:
91
+ possible_pairs.extend(itertools.permutations(answer_list, 2))
92
+ else:
93
+ raise ValueError("标准答案只有一个")
94
+
95
+ for ans1, ans2 in possible_pairs:
96
+ m1 = evaluate_prediction_against_answer(pred1, ans1)
97
+ m2 = evaluate_prediction_against_answer(pred2, ans2)
98
+ total_em = m1['Exact_match'] + m2['Exact_match']
99
+ avg_fts = (m1['FTS'] + m2['FTS']) / 2
100
+
101
+ if (total_em > best_total_em) or \
102
+ (total_em == best_total_em and avg_fts > best_avg_fts):
103
+ best_total_em = total_em
104
+ best_avg_fts = avg_fts
105
+ best_metrics1 = m1
106
+ best_metrics2 = m2
107
+
108
+ return best_metrics1, best_metrics2, best_total_em, best_avg_fts
109
+
110
+
111
+ def evaluate_dataset(input_path, output_path, model_name):
112
+ """执行完整评估流程"""
113
+ df = pd.read_csv(input_path)
114
+ results = []
115
+
116
+ for _, row in tqdm(df.iterrows(), total=len(df)):
117
+ try:
118
+ # std评估
119
+ response_std = Eval_LLM.attempt_api_call(system_prompt, row['Question_std'], model=model_name)
120
+ # 1) 解析出两个 SMILES 串 (对应两个反应物组合)
121
+ pred_smiles_std_1, pred_smiles_std_2 = parse_two_smiles(response_std)
122
+ # 2) 解析答案列表
123
+ answer_list = ast.literal_eval(row['Answer'])
124
+
125
+ metrics_std_1, metrics_std_2, total_em_std, avg_fts_std = evaluate_two_predictions(
126
+ pred_smiles_std_1, pred_smiles_std_2, list(answer_list)
127
+ )
128
+ valid_std = (metrics_std_1['Valid'] + metrics_std_2['Valid']) / 2
129
+ exact_std = (metrics_std_1['Exact_match'] + metrics_std_2['Exact_match']) / 2
130
+ fts_std = (metrics_std_1['FTS'] + metrics_std_2['FTS']) / 2
131
+
132
+ # cot评估
133
+ # response_cot = Eval_LLM.attempt_api_call(system_prompt, row['Question_cot'], model=model_name)
134
+ # # 1) 解析出两个 SMILES 串 (对应两个反应物组合)
135
+ # pred_cot_1, pred_cot_2 = parse_two_smiles(response_cot)
136
+
137
+ # metrics_cot_1, metrics_cot_2, total_em_cot, avg_fts_cot = evaluate_two_predictions(
138
+ # pred_cot_1, pred_cot_2, list(answer_list)
139
+ # )
140
+ # valid_cot = (metrics_cot_1['Valid'] + metrics_cot_2['Valid']) / 2
141
+ # exact_cot = (metrics_cot_1['Exact_match'] + metrics_cot_2['Exact_match']) / 2
142
+ # fts_cot = (metrics_cot_1['FTS'] + metrics_cot_2['FTS']) / 2
143
+
144
+ result = row.to_dict()
145
+ result.update({
146
+ 'Model_response_std': response_std,
147
+ # 'Model_response_cot': response_cot,
148
+ # 标准问题预测结果
149
+ 'Pred_SMILES_std_1': pred_smiles_std_1,
150
+ 'Pred_SMILES_std_2': pred_smiles_std_2,
151
+ 'Valid_std_1': metrics_std_1['Valid'],
152
+ 'Exact_match_std_1': metrics_std_1['Exact_match'],
153
+ 'FTS_std_1': metrics_std_1['FTS'],
154
+ 'Valid_std_2': metrics_std_2['Valid'],
155
+ 'Exact_match_std_2': metrics_std_2['Exact_match'],
156
+ 'FTS_std_2': metrics_std_2['FTS'],
157
+ 'Valid_std': valid_std,
158
+ 'Exact_match_std': exact_std,
159
+ 'FTS_std': fts_std
160
+ # CoT问题预测结果
161
+ # 'Pred_SMILES_cot_1': pred_cot_1,
162
+ # 'Pred_SMILES_cot_2': pred_cot_2,
163
+ # 'Valid_cot_1': metrics_cot_1['Valid'],
164
+ # 'Exact_match_cot_1': metrics_cot_1['Exact_match'],
165
+ # 'FTS_cot_1': metrics_cot_1['FTS'],
166
+ # 'Valid_cot_2': metrics_cot_2['Valid'],
167
+ # 'Exact_match_cot_2': metrics_cot_2['Exact_match'],
168
+ # 'FTS_cot_2': metrics_cot_2['FTS'],
169
+ # 'Valid_cot': valid_cot,
170
+ # 'Exact_match_cot': exact_cot,
171
+ # 'FTS_cot': fts_cot,
172
+ })
173
+ results.append(result)
174
+ except:
175
+ print("出现错误")
176
+ continue
177
+
178
+ # 存结果
179
+ result_df = pd.DataFrame(results)
180
+ result_df.to_csv(output_path, index=False)
181
+
182
+ # 计算汇总指标
183
+ summary = {
184
+ 'Validity_rate_std': result_df['Valid_std'].mean(),
185
+ 'Exact_match_rate_std': result_df['Exact_match_std'].mean(),
186
+ 'Average_FTS_std': result_df['FTS_std'].mean()
187
+ # 'Validity_rate_cot': result_df['Valid_cot'].mean(),
188
+ # 'Exact_match_rate_cot': result_df['Exact_match_cot'].mean(),
189
+ # 'Average_FTS_cot': result_df['FTS_cot'].mean()
190
+ }
191
+
192
+ print("\nEvaluation Summary:")
193
+ print("Standard Questions:")
194
+ print(f"Validity Rate: {summary['Validity_rate_std']:.4f}")
195
+ print(f"Exact Match Rate: {summary['Exact_match_rate_std']:.4f}")
196
+ print(f"Average FTS: {summary['Average_FTS_std']:.4f}")
197
+ # print("\nCoT Questions:")
198
+ # print(f"Validity Rate: {summary['Validity_rate_cot']:.4f}")
199
+ # print(f"Exact Match Rate: {summary['Exact_match_rate_cot']:.4f}")
200
+ # print(f"Average FTS: {summary['Average_FTS_cot']:.4f}")
201
+ return summary
202
+
203
+ def wait_until_target_time():
204
+ # 设置时区为北京时间
205
+ tz = ZoneInfo("Asia/Shanghai")
206
+ now = datetime.datetime.now(tz)
207
+
208
+ # 构造目标时间
209
+ target_time = now.replace(hour=0, minute=25, second=0, microsecond=0)
210
+
211
+ # 如果当前时间已经过了今天23点,则目标时间改为明天23点
212
+ if now >= target_time:
213
+ target_time += datetime.timedelta(days=1)
214
+
215
+ # 计算需要休眠的时间差
216
+ delta = target_time - now
217
+ sleep_seconds = delta.total_seconds()
218
+
219
+ print(f"距离0:25北京时间还剩{sleep_seconds:.2f}秒,开始休眠...")
220
+ time.sleep(sleep_seconds)
221
+ # 使用示例
222
+ if __name__ == "__main__":
223
+ parser = argparse.ArgumentParser(description='Evaluate models on chemical dataset.')
224
+ parser.add_argument('--model_name', type=str, default="qwen-plus-2025-04-28", required=False, help='Name of the model to evaluate (e.g., gpt-4o, deepseek).')
225
+ parser.add_argument('--start_index', type=int, default=1, help='Start index for output files (default: 1).')
226
+ parser.add_argument('--end_index', type=int, default=6, help='End index for output files (exclusive, default: 6).')
227
+ parser.add_argument('--api_from', type=str, default="aliyun", help='End index for output files (exclusive, default: 6).')
228
+ args = parser.parse_args()
229
+
230
+ Eval_LLM = openai_api.OpenAIClient(api_from=args.api_from)
231
+ system_prompt = 'You are an expert in chemistry.'
232
+
233
+ # wait_until_target_time()
234
+ input_path = '/home/xshe/KG/eval/benchmark/new_benchmark/new_pattern_2.csv'
235
+ output_base_path = f'/home/xshe/KG/eval/benchmark/new_results/pat2_results_{args.model_name}_'
236
+ all_summary = []
237
+ for i in range(args.start_index, args.end_index): # 生成3个输出文件
238
+ output_path = f"{output_base_path}{i}.csv"
239
+ summary = evaluate_dataset(input_path, output_path, args.model_name)
240
+ all_summary.append(summary)
241
+
242
+ for summary in all_summary:
243
+ print("\nEvaluation Summary:")
244
+ print("Standard Questions:")
245
+ print(f"Validity Rate: {summary['Validity_rate_std']:.4f}")
246
+ print(f"Exact Match Rate: {summary['Exact_match_rate_std']:.4f}")
247
+ print(f"Average FTS: {summary['Average_FTS_std']:.4f}")
248
+ # print("\nCoT Questions:")
249
+ # print(f"Validity Rate: {summary['Validity_rate_cot']:.4f}")
250
+ # print(f"Exact Match Rate: {summary['Exact_match_rate_cot']:.4f}")
251
+ # print(f"Average FTS: {summary['Average_FTS_cot']:.4f}")
eval_pat_4.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import re
3
+ import ast
4
+ from rdkit import Chem
5
+ from rdkit.Chem import AllChem
6
+ from rdkit import DataStructs
7
+ from tqdm import tqdm
8
+ import itertools
9
+ import openai_api
10
+ import datetime
11
+ import time
12
+ from zoneinfo import ZoneInfo
13
+ import argparse
14
+
15
+ def canonical_smiles(smiles):
16
+ """将SMILES转换为规范形式"""
17
+ try:
18
+ mol = Chem.MolFromSmiles(smiles)
19
+ return Chem.MolToSmiles(mol) if mol else None
20
+ except:
21
+ return None
22
+
23
+ def parse_two_smiles(response):
24
+ """
25
+ 从回复中解析出最多两个 <SMILES> ... </SMILES> 的内容。
26
+ 如果只匹配到一个,则返回 [pred1, None];
27
+ 如果一个都没匹配到,则返回 [None, None]。
28
+ """
29
+ matches = re.findall(r'<SMILES>(.*?)</SMILES>', response, re.DOTALL)
30
+ # 取最后两个
31
+ if len(matches) >= 2:
32
+ return [matches[-1].strip(), matches[-2].strip()]
33
+ elif len(matches) == 1:
34
+ return [matches[0].strip(), None]
35
+ else:
36
+ return [None, None]
37
+
38
+ def evaluate_prediction_against_answer(pred_smiles, answer):
39
+ """"评估单个预测与特定答案的匹配情况,返回指标"""
40
+ if answer is None:
41
+ if not pred_smiles:
42
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
43
+ pred_mol = Chem.MolFromSmiles(pred_smiles)
44
+ if pred_mol is None:
45
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
46
+ else:
47
+ return {'Valid': 1, 'Exact_match': 0, 'FTS': 0.0}
48
+
49
+ # 处理答案有效性
50
+ ans_mol = Chem.MolFromSmiles(answer)
51
+ if ans_mol is None:
52
+ raise ValueError(f"答案 {answer} 无效")
53
+ ans_canon = Chem.MolToSmiles(ans_mol)
54
+
55
+ # 处理预测有效性
56
+ if not pred_smiles:
57
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
58
+ pred_mol = Chem.MolFromSmiles(pred_smiles)
59
+ if pred_mol is None:
60
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
61
+ pred_canon = Chem.MolToSmiles(pred_mol)
62
+
63
+ # 计算Exact Match
64
+ exact_match = 1 if pred_canon == ans_canon else 0
65
+
66
+ # 计算FTS
67
+ fp_pred = AllChem.GetMorganFingerprintAsBitVect(pred_mol, 2, nBits=2048)
68
+ fp_ans = AllChem.GetMorganFingerprintAsBitVect(ans_mol, 2, nBits=2048)
69
+ fts = DataStructs.TanimotoSimilarity(fp_pred, fp_ans)
70
+
71
+ return {
72
+ 'Valid': 1,
73
+ 'Exact_match': exact_match,
74
+ 'FTS': fts
75
+ }
76
+
77
+ def evaluate_two_predictions(pred1, pred2, answer_list):
78
+ """评估两个预测与答案列表的最优匹配,返回最佳指标组合"""
79
+ best_metrics1 = {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
80
+ best_metrics2 = {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
81
+ best_total_em = -1
82
+ best_avg_fts = -1.0
83
+
84
+ if not answer_list:
85
+ m1 = evaluate_prediction_against_answer(pred1, None)
86
+ m2 = evaluate_prediction_against_answer(pred2, None)
87
+ return m1, m2, 0, 0.0
88
+
89
+ possible_pairs = []
90
+ if len(answer_list) >= 2:
91
+ possible_pairs.extend(itertools.permutations(answer_list, 2))
92
+ else:
93
+ raise ValueError("标准答案只有一个")
94
+
95
+ for ans1, ans2 in possible_pairs:
96
+ m1 = evaluate_prediction_against_answer(pred1, ans1)
97
+ m2 = evaluate_prediction_against_answer(pred2, ans2)
98
+ total_em = m1['Exact_match'] + m2['Exact_match']
99
+ avg_fts = (m1['FTS'] + m2['FTS']) / 2
100
+
101
+ if (total_em > best_total_em) or \
102
+ (total_em == best_total_em and avg_fts > best_avg_fts):
103
+ best_total_em = total_em
104
+ best_avg_fts = avg_fts
105
+ best_metrics1 = m1
106
+ best_metrics2 = m2
107
+
108
+ return best_metrics1, best_metrics2, best_total_em, best_avg_fts
109
+
110
+
111
+ def evaluate_dataset(input_path, output_path, model_name):
112
+ """执行完整评估流程"""
113
+ df = pd.read_csv(input_path)
114
+ results = []
115
+
116
+ for _, row in tqdm(df.iterrows(), total=len(df)):
117
+ try:
118
+ # std评估
119
+ response_std = Eval_LLM.attempt_api_call(system_prompt, row['Question_std'], model=model_name)
120
+ # 1) 解析出两个 SMILES 串 (对应两个反应物组合)
121
+ pred_smiles_std_1, pred_smiles_std_2 = parse_two_smiles(response_std)
122
+ # 2) 解析答案列表
123
+ answer_list = ast.literal_eval(row['Answer'])
124
+
125
+ metrics_std_1, metrics_std_2, total_em_std, avg_fts_std = evaluate_two_predictions(
126
+ pred_smiles_std_1, pred_smiles_std_2, list(answer_list)
127
+ )
128
+ valid_std = (metrics_std_1['Valid'] + metrics_std_2['Valid']) / 2
129
+ exact_std = (metrics_std_1['Exact_match'] + metrics_std_2['Exact_match']) / 2
130
+ fts_std = (metrics_std_1['FTS'] + metrics_std_2['FTS']) / 2
131
+
132
+ # cot评估
133
+ # response_cot = Eval_LLM.attempt_api_call(system_prompt, row['Question_cot'], model=model_name)
134
+ # # 1) 解析出两个 SMILES 串 (对应两个反应物组合)
135
+ # pred_cot_1, pred_cot_2 = parse_two_smiles(response_cot)
136
+
137
+ # metrics_cot_1, metrics_cot_2, total_em_cot, avg_fts_cot = evaluate_two_predictions(
138
+ # pred_cot_1, pred_cot_2, list(answer_list)
139
+ # )
140
+ # valid_cot = (metrics_cot_1['Valid'] + metrics_cot_2['Valid']) / 2
141
+ # exact_cot = (metrics_cot_1['Exact_match'] + metrics_cot_2['Exact_match']) / 2
142
+ # fts_cot = (metrics_cot_1['FTS'] + metrics_cot_2['FTS']) / 2
143
+
144
+ result = row.to_dict()
145
+ result.update({
146
+ 'Model_response_std': response_std,
147
+ # 'Model_response_cot': response_cot,
148
+ # 标准问题预测结果
149
+ 'Pred_SMILES_std_1': pred_smiles_std_1,
150
+ 'Pred_SMILES_std_2': pred_smiles_std_2,
151
+ 'Valid_std_1': metrics_std_1['Valid'],
152
+ 'Exact_match_std_1': metrics_std_1['Exact_match'],
153
+ 'FTS_std_1': metrics_std_1['FTS'],
154
+ 'Valid_std_2': metrics_std_2['Valid'],
155
+ 'Exact_match_std_2': metrics_std_2['Exact_match'],
156
+ 'FTS_std_2': metrics_std_2['FTS'],
157
+ 'Valid_std': valid_std,
158
+ 'Exact_match_std': exact_std,
159
+ 'FTS_std': fts_std
160
+ # CoT问题预测结果
161
+ # 'Pred_SMILES_cot_1': pred_cot_1,
162
+ # 'Pred_SMILES_cot_2': pred_cot_2,
163
+ # 'Valid_cot_1': metrics_cot_1['Valid'],
164
+ # 'Exact_match_cot_1': metrics_cot_1['Exact_match'],
165
+ # 'FTS_cot_1': metrics_cot_1['FTS'],
166
+ # 'Valid_cot_2': metrics_cot_2['Valid'],
167
+ # 'Exact_match_cot_2': metrics_cot_2['Exact_match'],
168
+ # 'FTS_cot_2': metrics_cot_2['FTS'],
169
+ # 'Valid_cot': valid_cot,
170
+ # 'Exact_match_cot': exact_cot,
171
+ # 'FTS_cot': fts_cot,
172
+ })
173
+ results.append(result)
174
+ except:
175
+ print("出现错误")
176
+ continue
177
+
178
+ # 存结果
179
+ result_df = pd.DataFrame(results)
180
+ result_df.to_csv(output_path, index=False)
181
+
182
+ # 计算汇总指标
183
+ summary = {
184
+ 'Validity_rate_std': result_df['Valid_std'].mean(),
185
+ 'Exact_match_rate_std': result_df['Exact_match_std'].mean(),
186
+ 'Average_FTS_std': result_df['FTS_std'].mean()
187
+ # 'Validity_rate_cot': result_df['Valid_cot'].mean(),
188
+ # 'Exact_match_rate_cot': result_df['Exact_match_cot'].mean(),
189
+ # 'Average_FTS_cot': result_df['FTS_cot'].mean()
190
+ }
191
+
192
+ print("\nEvaluation Summary:")
193
+ print("Standard Questions:")
194
+ print(f"Validity Rate: {summary['Validity_rate_std']:.4f}")
195
+ print(f"Exact Match Rate: {summary['Exact_match_rate_std']:.4f}")
196
+ print(f"Average FTS: {summary['Average_FTS_std']:.4f}")
197
+ # print("\nCoT Questions:")
198
+ # print(f"Validity Rate: {summary['Validity_rate_cot']:.4f}")
199
+ # print(f"Exact Match Rate: {summary['Exact_match_rate_cot']:.4f}")
200
+ # print(f"Average FTS: {summary['Average_FTS_cot']:.4f}")
201
+ return summary
202
+
203
+ def wait_until_target_time():
204
+ # 设置时区为北京时间
205
+ tz = ZoneInfo("Asia/Shanghai")
206
+ now = datetime.datetime.now(tz)
207
+
208
+ # 构造目标时间
209
+ target_time = now.replace(hour=0, minute=25, second=0, microsecond=0)
210
+
211
+ # 如果当前时间已经过了今天23点,则目标时间改为明天23点
212
+ if now >= target_time:
213
+ target_time += datetime.timedelta(days=1)
214
+
215
+ # 计算需要休眠的时间差
216
+ delta = target_time - now
217
+ sleep_seconds = delta.total_seconds()
218
+
219
+ print(f"距离0:25北京时间还剩{sleep_seconds:.2f}秒,开始休眠...")
220
+ time.sleep(sleep_seconds)
221
+
222
+ # 使用示例
223
+ if __name__ == "__main__":
224
+ parser = argparse.ArgumentParser(description='Evaluate models on chemical dataset.')
225
+ parser.add_argument('--model_name', type=str, required=True, help='Name of the model to evaluate (e.g., gpt-4o, deepseek).')
226
+ parser.add_argument('--start_index', type=int, default=1, help='Start index for output files (default: 1).')
227
+ parser.add_argument('--end_index', type=int, default=6, help='End index for output files (exclusive, default: 6).')
228
+ parser.add_argument('--api_from', type=str, default="xunfei", help='End index for output files (exclusive, default: 6).')
229
+ args = parser.parse_args()
230
+
231
+ Eval_LLM = openai_api.OpenAIClient(api_from=args.api_from)
232
+ system_prompt = 'You are an expert in chemistry.'
233
+
234
+ # wait_until_target_time()
235
+ input_path = '/home/xshe/KG/eval/benchmark/new_benchmark/new_pattern_3.csv'
236
+ output_base_path = f'/home/xshe/KG/eval/benchmark/new_results/pat3_results_{args.model_name}_'
237
+ all_summary = []
238
+ for i in range(args.start_index, args.end_index): # 生成3个输出文件
239
+ output_path = f"{output_base_path}{i}.csv"
240
+ summary = evaluate_dataset(input_path, output_path, args.model_name)
241
+ all_summary.append(summary)
242
+
243
+ for summary in all_summary:
244
+ print("\nEvaluation Summary:")
245
+ print("Standard Questions:")
246
+ print(f"Validity Rate: {summary['Validity_rate_std']:.4f}")
247
+ print(f"Exact Match Rate: {summary['Exact_match_rate_std']:.4f}")
248
+ print(f"Average FTS: {summary['Average_FTS_std']:.4f}")
249
+ # print("\nCoT Questions:")
250
+ # print(f"Validity Rate: {summary['Validity_rate_cot']:.4f}")
251
+ # print(f"Exact Match Rate: {summary['Exact_match_rate_cot']:.4f}")
252
+ # print(f"Average FTS: {summary['Average_FTS_cot']:.4f}")
eval_pat_5.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import re
3
+ import ast
4
+ from rdkit import Chem
5
+ from rdkit.Chem import AllChem
6
+ from rdkit import DataStructs
7
+ from tqdm import tqdm
8
+ import itertools
9
+ import openai_api
10
+ import datetime
11
+ import time
12
+ from zoneinfo import ZoneInfo
13
+ import argparse
14
+
15
+ def canonical_smiles(smiles):
16
+ """将SMILES转换为规范形式"""
17
+ try:
18
+ mol = Chem.MolFromSmiles(smiles)
19
+ return Chem.MolToSmiles(mol) if mol else None
20
+ except:
21
+ return None
22
+
23
+ def parse_two_smiles(response):
24
+ """
25
+ 从回复中解析出最多两个 <SMILES> ... </SMILES> 的内容。
26
+ 如果只匹配到一个,则返回 [pred1, None];
27
+ 如果一个都没匹配到,则返回 [None, None]。
28
+ """
29
+ matches = re.findall(r'<SMILES>(.*?)</SMILES>', response, re.DOTALL)
30
+ # 取最后两个
31
+ if len(matches) >= 2:
32
+ return [matches[-1].strip(), matches[-2].strip()]
33
+ elif len(matches) == 1:
34
+ return [matches[0].strip(), None]
35
+ else:
36
+ return [None, None]
37
+
38
+ def evaluate_prediction_against_answer(pred_smiles, answer):
39
+ """"评估单个预测与特定答案的匹配情况,返回指标"""
40
+ if answer is None:
41
+ if not pred_smiles:
42
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
43
+ pred_mol = Chem.MolFromSmiles(pred_smiles)
44
+ if pred_mol is None:
45
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
46
+ else:
47
+ return {'Valid': 1, 'Exact_match': 0, 'FTS': 0.0}
48
+
49
+ # 处理答案有效性
50
+ ans_mol = Chem.MolFromSmiles(answer)
51
+ if ans_mol is None:
52
+ raise ValueError(f"答案 {answer} 无效")
53
+ ans_canon = Chem.MolToSmiles(ans_mol)
54
+
55
+ # 处理预测有效性
56
+ if not pred_smiles:
57
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
58
+ pred_mol = Chem.MolFromSmiles(pred_smiles)
59
+ if pred_mol is None:
60
+ return {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
61
+ pred_canon = Chem.MolToSmiles(pred_mol)
62
+
63
+ # 计算Exact Match
64
+ exact_match = 1 if pred_canon == ans_canon else 0
65
+
66
+ # 计算FTS
67
+ fp_pred = AllChem.GetMorganFingerprintAsBitVect(pred_mol, 2, nBits=2048)
68
+ fp_ans = AllChem.GetMorganFingerprintAsBitVect(ans_mol, 2, nBits=2048)
69
+ fts = DataStructs.TanimotoSimilarity(fp_pred, fp_ans)
70
+
71
+ return {
72
+ 'Valid': 1,
73
+ 'Exact_match': exact_match,
74
+ 'FTS': fts
75
+ }
76
+
77
+ def evaluate_two_predictions(pred1, pred2, answer_list):
78
+ """评估两个预测与答案列表的最优匹配,返回最佳指标组合"""
79
+ best_metrics1 = {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
80
+ best_metrics2 = {'Valid': 0, 'Exact_match': 0, 'FTS': 0.0}
81
+ best_total_em = -1
82
+ best_avg_fts = -1.0
83
+
84
+ if not answer_list:
85
+ m1 = evaluate_prediction_against_answer(pred1, None)
86
+ m2 = evaluate_prediction_against_answer(pred2, None)
87
+ return m1, m2, 0, 0.0
88
+
89
+ possible_pairs = []
90
+ if len(answer_list) >= 2:
91
+ possible_pairs.extend(itertools.permutations(answer_list, 2))
92
+ else:
93
+ raise ValueError("标准答案只有一个")
94
+
95
+ for ans1, ans2 in possible_pairs:
96
+ m1 = evaluate_prediction_against_answer(pred1, ans1)
97
+ m2 = evaluate_prediction_against_answer(pred2, ans2)
98
+ total_em = m1['Exact_match'] + m2['Exact_match']
99
+ avg_fts = (m1['FTS'] + m2['FTS']) / 2
100
+
101
+ if (total_em > best_total_em) or \
102
+ (total_em == best_total_em and avg_fts > best_avg_fts):
103
+ best_total_em = total_em
104
+ best_avg_fts = avg_fts
105
+ best_metrics1 = m1
106
+ best_metrics2 = m2
107
+
108
+ return best_metrics1, best_metrics2, best_total_em, best_avg_fts
109
+
110
+
111
+ def evaluate_dataset(input_path, output_path, model_name):
112
+ """执行完整评估流程"""
113
+ df = pd.read_csv(input_path)
114
+ results = []
115
+
116
+ for _, row in tqdm(df.iterrows(), total=len(df)):
117
+ try:
118
+ # std评估
119
+ response_std = Eval_LLM.attempt_api_call(system_prompt, row['Question_std'], model=model_name)
120
+ # 1) 解析出两个 SMILES 串 (对应两个反应物组合)
121
+ pred_smiles_std_1, pred_smiles_std_2 = parse_two_smiles(response_std)
122
+ # 2) 解析答案列表
123
+ answer_list = ast.literal_eval(row['Answer'])
124
+
125
+ metrics_std_1, metrics_std_2, total_em_std, avg_fts_std = evaluate_two_predictions(
126
+ pred_smiles_std_1, pred_smiles_std_2, list(answer_list)
127
+ )
128
+ valid_std = (metrics_std_1['Valid'] + metrics_std_2['Valid']) / 2
129
+ exact_std = (metrics_std_1['Exact_match'] + metrics_std_2['Exact_match']) / 2
130
+ fts_std = (metrics_std_1['FTS'] + metrics_std_2['FTS']) / 2
131
+
132
+ # cot评估
133
+ # response_cot = Eval_LLM.attempt_api_call(system_prompt, row['Question_cot'], model=model_name)
134
+ # # 1) 解析出两个 SMILES 串 (对应两个反应物组合)
135
+ # pred_cot_1, pred_cot_2 = parse_two_smiles(response_cot)
136
+
137
+ # metrics_cot_1, metrics_cot_2, total_em_cot, avg_fts_cot = evaluate_two_predictions(
138
+ # pred_cot_1, pred_cot_2, list(answer_list)
139
+ # )
140
+ # valid_cot = (metrics_cot_1['Valid'] + metrics_cot_2['Valid']) / 2
141
+ # exact_cot = (metrics_cot_1['Exact_match'] + metrics_cot_2['Exact_match']) / 2
142
+ # fts_cot = (metrics_cot_1['FTS'] + metrics_cot_2['FTS']) / 2
143
+
144
+ result = row.to_dict()
145
+ result.update({
146
+ 'Model_response_std': response_std,
147
+ # 'Model_response_cot': response_cot,
148
+ # 标准问题预测结果
149
+ 'Pred_SMILES_std_1': pred_smiles_std_1,
150
+ 'Pred_SMILES_std_2': pred_smiles_std_2,
151
+ 'Valid_std_1': metrics_std_1['Valid'],
152
+ 'Exact_match_std_1': metrics_std_1['Exact_match'],
153
+ 'FTS_std_1': metrics_std_1['FTS'],
154
+ 'Valid_std_2': metrics_std_2['Valid'],
155
+ 'Exact_match_std_2': metrics_std_2['Exact_match'],
156
+ 'FTS_std_2': metrics_std_2['FTS'],
157
+ 'Valid_std': valid_std,
158
+ 'Exact_match_std': exact_std,
159
+ 'FTS_std': fts_std,
160
+ # CoT问题预测结果
161
+ # 'Pred_SMILES_cot_1': pred_cot_1,
162
+ # 'Pred_SMILES_cot_2': pred_cot_2,
163
+ # 'Valid_cot_1': metrics_cot_1['Valid'],
164
+ # 'Exact_match_cot_1': metrics_cot_1['Exact_match'],
165
+ # 'FTS_cot_1': metrics_cot_1['FTS'],
166
+ # 'Valid_cot_2': metrics_cot_2['Valid'],
167
+ # 'Exact_match_cot_2': metrics_cot_2['Exact_match'],
168
+ # 'FTS_cot_2': metrics_cot_2['FTS'],
169
+ # 'Valid_cot': valid_cot,
170
+ # 'Exact_match_cot': exact_cot,
171
+ # 'FTS_cot': fts_cot,
172
+ })
173
+ results.append(result)
174
+ except:
175
+ print("出现错误")
176
+ continue
177
+
178
+ # 存结果
179
+ result_df = pd.DataFrame(results)
180
+ result_df.to_csv(output_path, index=False)
181
+
182
+ # 计算汇总指标
183
+ summary = {
184
+ 'Validity_rate_std': result_df['Valid_std'].mean(),
185
+ 'Exact_match_rate_std': result_df['Exact_match_std'].mean(),
186
+ 'Average_FTS_std': result_df['FTS_std'].mean()
187
+ # 'Validity_rate_cot': result_df['Valid_cot'].mean(),
188
+ # 'Exact_match_rate_cot': result_df['Exact_match_cot'].mean(),
189
+ # 'Average_FTS_cot': result_df['FTS_cot'].mean()
190
+ }
191
+
192
+ print("\nEvaluation Summary:")
193
+ print("Standard Questions:")
194
+ print(f"Validity Rate: {summary['Validity_rate_std']:.4f}")
195
+ print(f"Exact Match Rate: {summary['Exact_match_rate_std']:.4f}")
196
+ print(f"Average FTS: {summary['Average_FTS_std']:.4f}")
197
+ # print("\nCoT Questions:")
198
+ # print(f"Validity Rate: {summary['Validity_rate_cot']:.4f}")
199
+ # print(f"Exact Match Rate: {summary['Exact_match_rate_cot']:.4f}")
200
+ # print(f"Average FTS: {summary['Average_FTS_cot']:.4f}")
201
+ return summary
202
+
203
+ def wait_until_target_time():
204
+ # 设置时区为北京时间
205
+ tz = ZoneInfo("Asia/Shanghai")
206
+ now = datetime.datetime.now(tz)
207
+
208
+ # 构造目标时间
209
+ target_time = now.replace(hour=0, minute=25, second=0, microsecond=0)
210
+
211
+ # 如果当前时间已经过了今天23点,则目标时间改为明天23点
212
+ if now >= target_time:
213
+ target_time += datetime.timedelta(days=1)
214
+
215
+ # 计算需要休眠的时间差
216
+ delta = target_time - now
217
+ sleep_seconds = delta.total_seconds()
218
+
219
+ print(f"距离0:25北京时间还剩{sleep_seconds:.2f}秒,开始休眠...")
220
+ time.sleep(sleep_seconds)
221
+
222
+ # 使用示例
223
+ if __name__ == "__main__":
224
+ parser = argparse.ArgumentParser(description='Evaluate models on chemical dataset.')
225
+ parser.add_argument('--model_name', type=str, required=True, help='Name of the model to evaluate (e.g., gpt-4o, deepseek).')
226
+ parser.add_argument('--start_index', type=int, default=1, help='Start index for output files (default: 1).')
227
+ parser.add_argument('--end_index', type=int, default=6, help='End index for output files (exclusive, default: 6).')
228
+ parser.add_argument('--api_from', type=str, default="xunfei", help='End index for output files (exclusive, default: 6).')
229
+ args = parser.parse_args()
230
+
231
+ Eval_LLM = openai_api.OpenAIClient(api_from=args.api_from)
232
+ system_prompt = 'You are an expert in chemistry.'
233
+
234
+ # wait_until_target_time()
235
+ input_path = '/home/xshe/KG/eval/benchmark/new_benchmark/new_pattern_4.csv'
236
+ output_base_path = f'/home/xshe/KG/eval/benchmark/new_results/pat4_results_{args.model_name}_'
237
+ all_summary = []
238
+ for i in range(args.start_index, args.end_index): # 生成3个输出文件
239
+ output_path = f"{output_base_path}{i}.csv"
240
+ summary = evaluate_dataset(input_path, output_path, args.model_name)
241
+ all_summary.append(summary)
242
+
243
+ for summary in all_summary:
244
+ print("\nEvaluation Summary:")
245
+ print("Standard Questions:")
246
+ print(f"Validity Rate: {summary['Validity_rate_std']:.4f}")
247
+ print(f"Exact Match Rate: {summary['Exact_match_rate_std']:.4f}")
248
+ print(f"Average FTS: {summary['Average_FTS_std']:.4f}")
249
+ # print("\nCoT Questions:")
250
+ # print(f"Validity Rate: {summary['Validity_rate_cot']:.4f}")
251
+ # print(f"Exact Match Rate: {summary['Exact_match_rate_cot']:.4f}")
252
+ # print(f"Average FTS: {summary['Average_FTS_cot']:.4f}")
new_pattern_1.csv ADDED
The diff for this file is too large to render. See raw diff
 
new_pattern_2.csv ADDED
The diff for this file is too large to render. See raw diff
 
new_pattern_3.csv ADDED
The diff for this file is too large to render. See raw diff
 
new_pattern_4.csv ADDED
The diff for this file is too large to render. See raw diff
 
new_pattern_5.csv ADDED
The diff for this file is too large to render. See raw diff