File size: 6,328 Bytes
cb5bf37 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | # from googlesearch import search
# from typing import List
# def google_search(query: str, num_results: int = 10) -> List[str]:
# """
# Execute a Google search and return a list of URLs.
# Args:
# query (str): The search query to submit to Google.
# num_results (int, optional): The number of search results to return. Default is 10.
# Returns:
# List[str]: A list of URLs matching the search query.
# """
# links = list(search(query, num_results=10))
# return links
# res = google_search("when does season 14 of grey's anatomy come out -site:en.wikipedia.org")
# print(res)
# def run_evaluation_for_eval(filtered_data, input_list, output_list, dataset_name, output_dir, total_time, split, apply_backoff=False):
# if dataset_name != 'eval':
# raise ValueError(f"Unsupported dataset: {dataset_name}")
# else:
# # Existing evaluation for other datasets
# avg_em, avg_acc, avg_f1, avg_math = [], [], [], []
# num_valid_answer = 0
# # If the dataset is eval, track metrics per source
# source_metrics = {}
# for item, input_prompt, result in zip(filtered_data, input_list, output_list):
# if type(result) == str:
# item['Output'] = result
# else:
# item['Output'] = result.outputs[0].text
# if dataset_name in ['gpqa', 'medmcqa']:
# labeled_answer = item["Correct Choice"]
# # labeled_choice_answer = item["Correct Answer"]
# mode = 'choose'
# elif dataset_name in ['math500', 'aime', 'amc']:
# labeled_answer = item["answer"]
# mode = 'gen'
# elif dataset_name in ['eval', 'chinese_simpleqa', 'simpleqa', 'nq', 'triviaqa', 'hotpotqa', 'musique', 'bamboogle', '2wiki']:
# labeled_answer = item["answer"]
# mode = 'qa'
# elif dataset_name in ['pubhealth']:
# labeled_answer = item["answer"]
# mode = 'choose'
# else:
# raise ValueError(f"Unknown dataset_name: {dataset_name}")
# metric, pred_answer = evaluate_predictions(output=item['Output'], labeled_answer=labeled_answer, mode=mode)
# item['Pred_Answer'] = pred_answer
# item['Metrics'] = metric
# item['Question'] = input_prompt
# # Determine the validity of the predicted answer
# my_method_valid = (pred_answer != '' and not (mode == 'choose' and dataset_name == 'gpqa' and len(pred_answer) > 1))
# avg_em.append(metric['em'])
# avg_acc.append(metric['acc'])
# avg_f1.append(metric['f1'])
# avg_math.append(metric['math_equal'])
# if my_method_valid:
# num_valid_answer += 1
# # If the dataset is GPQA, attempt to track metrics per source
# if dataset_name == 'eval':
# source = item.get("source", "Unknown")
# if source not in source_metrics:
# source_metrics[source] = {'em': [], 'acc': [], 'f1': [], 'math_equal': [], 'num_valid_answer': 0, 'total_num': 0}
# source_metrics[source]['total_num'] += 1
# source_metrics[source]['em'].append(metric['em'])
# source_metrics[source]['acc'].append(metric['acc'])
# source_metrics[source]['f1'].append(metric['f1'])
# source_metrics[source]['math_equal'].append(metric['math_equal'])
# if my_method_valid:
# source_metrics[source]['num_valid_answer'] += 1
# t = time.localtime()
# result_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.json'
# metrics_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.metrics.json'
# # Compute overall metrics
# overall_results = {
# 'em': np.mean(avg_em) if len(avg_em) > 0 else 0.0,
# 'acc': np.mean(avg_acc) if len(avg_acc) > 0 else 0.0,
# 'f1': np.mean(avg_f1) if len(avg_f1) > 0 else 0.0,
# 'math_equal': np.mean(avg_math) if len(avg_em) > 0 else 0.0,
# 'num_valid_answer': f'{num_valid_answer} of {len(input_list)}',
# 'query_latency': f'{(total_time / len(input_list) * 1000):.0f} ms',
# }
# # If the dataset is eval, output average metrics per source
# source_avg_metrics = {}
# if dataset_name == 'eval':
# for dm, m in source_metrics.items():
# source_avg_metrics[dm] = {
# 'em': np.mean(m['em']) if len(m['em']) > 0 else 0,
# 'acc': np.mean(m['acc']) if len(m['acc']) > 0 else 0,
# 'f1': np.mean(m['f1']) if len(m['f1']) > 0 else 0,
# 'math_equal': np.mean(m['math_equal']) if len(m['math_equal']) > 0 else 0,
# 'num_valid_answer': f'{m["num_valid_answer"]} of {m["total_num"]}'
# }
# # 保存总体和分source的指标
# final_metrics = {'overall': overall_results}
# if dataset_name == 'eval':
# final_metrics['per_source'] = source_avg_metrics
# t = time.localtime()
# result_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.json'
# metrics_json_name = f'{split}.{t.tm_mon}.{t.tm_mday},{t.tm_hour}:{t.tm_min}.metrics.json'
# if apply_backoff:
# result_json_name = output_dir
# metrics_json_name = output_dir.replace('.json', '.metrics.backoff.json')
# # Save prediction results and metrics
# with open(os.path.join(output_dir, result_json_name), mode='w', encoding='utf-8') as json_file:
# json.dump(filtered_data, json_file, indent=4, ensure_ascii=False)
# with open(os.path.join(output_dir, metrics_json_name), mode='w', encoding='utf-8') as json_file:
# json.dump(final_metrics, json_file, indent=4, ensure_ascii=False)
import json
with open('/share/project/sunshuang/deep_search/search_o1/cache_simpleqa_exurls_qwq/url_cache.json', 'r') as f:
data = json.load(f)
print(f"data len: {len(data)}")
print(f"data[0]: {data['https://www.transparency.org/en/cpi/2024']}") |