File size: 8,515 Bytes
4ac1fc5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import json
from collections import defaultdict,OrderedDict, Counter
from tqdm import tqdm
import random
import matplotlib.pyplot as plt
def analyze_and_visualize_sources(final_dataset, output_image_path="source_distribution_selected_data.png", output_json_path="source_counts.json"):
"""
统计 final_dataset 中 source 的分布,并绘制饼图和保存为 JSON 文件。
:param final_dataset: 输入的数据列表,每个元素是一个字典
:param output_image_path: 饼图保存路径(默认为 "source_distribution.png")
:param output_json_path: JSON 文件保存路径(默认为 "source_counts.json")
"""
# 提取所有包含 source 的项
sources = [item["source"] for item in final_dataset if "source" in item]
# 统计 source 的分布
source_counts = Counter(sources)
# 将统计结果保存为 JSON 文件
with open(output_json_path, "w", encoding="utf-8") as f:
json.dump(source_counts, f, ensure_ascii=False, indent=4)
print(f"Source 分布已保存到 {output_json_path}")
# 绘制饼图
labels = list(source_counts.keys())
counts = list(source_counts.values())
plt.figure(figsize=(8, 8))
plt.pie(counts, labels=labels, autopct='%1.1f%%', startangle=140)
plt.title("Source Distribution")
plt.axis('equal') # 确保饼图为正圆
# 保存饼图为图片文件
plt.savefig(output_image_path)
plt.close()
print(f"Source 分布饼图已保存到 {output_image_path}")
# 读取原始数据
with open('/share/project/sunshuang/deep_search/data_for_rl/musique_tagged/musique_tagged_domain_keypoints_keywords_count.json') as f:
data = json.load(f)
for idx, item in enumerate(data): # 增加Question key
if "question" in item:
item["Question"] = item["question"]
# step 1 预处理 Domain 分类
# 统计原始 domain 分布
print("step 1: preprocss domain data")
domain_counter = defaultdict(int)
for item in data:
domain = item["domain_keypoints"]["domain"]
domain_counter[domain] += 1
# 确定有效 domain(>=100 条)
# valid_domains = {d for d, cnt in domain_counter.items() if cnt >= 100}
# other_domains = [d for d, cnt in domain_counter.items() if cnt < 100]
valid_domains = {d for d, cnt in domain_counter.items() if cnt >= 500}
other_domains = [d for d, cnt in domain_counter.items() if cnt < 500]
print(f"valid_domains: {len(valid_domains)}")
print(f"other_domains: {len(other_domains)}")
# 重构数据集(合并 small domains 到 other)
processed_data = []
for item in data:
original_domain = item["domain_keypoints"]["domain"]
if original_domain in valid_domains:
new_domain = original_domain
else:
new_domain = "other"
new_item = {
**item,
"domain_keypoints": {
**item["domain_keypoints"],
"domain": new_domain
}
}
processed_data.append(new_item)
# step 2 分配样本配额
# 计算新 domain 分布
print("step 2: calculate domain distribution")
new_domain_counts = defaultdict(int)
for item in processed_data:
domain = item["domain_keypoints"]["domain"]
new_domain_counts[domain] += 1
# 确定最终 domain 列表
final_domains = list(valid_domains)
if new_domain_counts.get("other", 0) > 0:
final_domains.append("other")
print(f"new_domain_counts: {len(new_domain_counts)}")
print(f"final_domains: {len(final_domains)}")
# 分配 2000 条样本配额
total_samples = 4000
num_domains = len(final_domains)
base_quota = total_samples // num_domains
remainder = total_samples % num_domains
print(f"base_quota: {base_quota}")
print(f"remainder: {remainder}")
domain_quotas = {}
# cnttt = 0
for idx, domain in enumerate(final_domains):
domain_quotas[domain] = base_quota + (1 if idx < remainder else 0)
# cnttt += domain_quotas[domain]
# print(f"cnttt: {cnttt}")
# step 3 统计 Keypoints 分布
print("step 3 calculating keypoints distribution")
domain_keypoint_dist = defaultdict(lambda: defaultdict(int))
for item in tqdm(processed_data, desc="Calculating keypoints distribution"):
domain = item["domain_keypoints"]["domain"]
keypoints = item["domain_keypoints"]["key_points"]
for kp in keypoints:
domain_keypoint_dist[domain][kp] += 1
# domain内部按照key points降序排列
for domain, keypoint_counts in domain_keypoint_dist.items():
sorted_keypoints = sorted(keypoint_counts.items(), key=lambda x: x[1], reverse=True)
domain_keypoint_dist[domain] = OrderedDict(sorted_keypoints)
# 保存分布文件
with open("domain_keypoints_distribution_without_remove_dup.json", "w") as f:
json.dump(domain_keypoint_dist, f, indent=4,ensure_ascii=False)
# step 4 分层抽样与去重
print("step 4: select data")
final_dataset = []
question_set = set()
real_selected_cnt = 0
for domain in tqdm(final_domains, desc="Processing domains"):
# 获取当前 domain 的所有数据
domain_data = [item for item in processed_data if item["domain_keypoints"]["domain"] == domain]
print(f"-------------- process {domain}")
print(f"data: {len(domain_data)}")
# 计算 keypoint 配额
keypoints = list(domain_keypoint_dist[domain].keys())
# random.shuffle(keypoints) # 随机打乱顺序
print(f"key points {len(keypoints)}")
print(f"domain_quotas: {domain_quotas[domain]}")
# kp_base = domain_quotas[domain] // len(keypoints)
# print(f"kp_base: {kp_base}")
# kp_remainder = domain_quotas[domain] % len(keypoints)
# print(f"kp_remainder: {kp_remainder}")
# 因为domain_quotas远远小于keypoint的数目,改为将所有的数据按照total降序排序,然后从上往下选,重复上面的操作,直到选完
sorted_domain_data = sorted(domain_data, key=lambda x: x["keywords_count"]["total"], reverse=True)
selected_cnt = 0
cycle_cnt = 0
selected_index = [] # 已选择的index
while selected_cnt < domain_quotas[domain]: # 当前选择的数目小于domain_quotas[domain]
cycle_cnt += 1
# print(f"cycle_cnt: {cycle_cnt}")
selected_keypoints = set() # 每次循环都清零
for idx, item in enumerate(sorted_domain_data): # 遍历所有的数据
if selected_cnt >= domain_quotas[domain]:
break
if idx not in selected_index: # 当前数据未被选择
dup_keypoints = False
for key_point in item["domain_keypoints"]["key_points"]: # 确保数据的key point不包含在已经选择的key point里
if key_point in selected_keypoints:
dup_keypoints = True
break
if dup_keypoints: # 重复key points
continue
if item["Question"] not in question_set:
final_dataset.append(item)
question_set.add(item["Question"])
selected_keypoints.update(item["domain_keypoints"]["key_points"])
selected_index.append(idx)
selected_cnt += 1
real_selected_cnt += selected_cnt
print(f"cycle_cnt: {cycle_cnt}")
print(f"real_selected_cnt: {real_selected_cnt}")
# # 按 keypoint 分层抽样
# selected = []
# for kp_idx, kp in enumerate(keypoints):
# # 分配配额
# kp_quota = kp_base + (1 if kp_idx < kp_remainder else 0)
# # print(f"kp_quota: {kp_quota}")
# # 获取包含该 keypoint 的问题
# kp_items = [item for item in domain_data if kp in item["domain_keypoints"]["key_points"]]
# # 按 total 降序排序并选择
# sorted_items = sorted(kp_items, key=lambda x: x["keywords_count"]["total"], reverse=True)
# selected.extend(sorted_items[:kp_quota])
# # 去重并保留顺序,这一步导致选不够
# dup_cnt = 0
# for item in selected:
# q = item["Question"]
# if q not in question_set:
# final_dataset.append(item)
# question_set.add(q)
# else:
# dup_cnt += 1
# print(f"dup_cnt: {dup_cnt}")
# 精确控制最终数量
# final_dataset = sorted(final_dataset, key=lambda x: x["idx"])
print(f"final_dataset len: {len(final_dataset)}")
# final_dataset = final_dataset
analyze_and_visualize_sources(final_dataset)
# step 5 保存结果
with open("final_selected_dataset.json", "w") as f:
json.dump(final_dataset, f, indent=4, ensure_ascii=False) |