|
|
import json |
|
|
from collections import defaultdict,OrderedDict, Counter |
|
|
from tqdm import tqdm |
|
|
import random |
|
|
import matplotlib.pyplot as plt |
|
|
|
|
|
|
|
|
|
|
|
def analyze_and_visualize_sources(final_dataset, output_image_path="source_distribution_selected_data.png", output_json_path="source_counts.json"): |
|
|
""" |
|
|
统计 final_dataset 中 source 的分布,并绘制饼图和保存为 JSON 文件。 |
|
|
|
|
|
:param final_dataset: 输入的数据列表,每个元素是一个字典 |
|
|
:param output_image_path: 饼图保存路径(默认为 "source_distribution.png") |
|
|
:param output_json_path: JSON 文件保存路径(默认为 "source_counts.json") |
|
|
""" |
|
|
|
|
|
sources = [item["source"] for item in final_dataset if "source" in item] |
|
|
|
|
|
|
|
|
source_counts = Counter(sources) |
|
|
|
|
|
|
|
|
with open(output_json_path, "w", encoding="utf-8") as f: |
|
|
json.dump(source_counts, f, ensure_ascii=False, indent=4) |
|
|
print(f"Source 分布已保存到 {output_json_path}") |
|
|
|
|
|
|
|
|
labels = list(source_counts.keys()) |
|
|
counts = list(source_counts.values()) |
|
|
|
|
|
plt.figure(figsize=(8, 8)) |
|
|
plt.pie(counts, labels=labels, autopct='%1.1f%%', startangle=140) |
|
|
plt.title("Source Distribution") |
|
|
plt.axis('equal') |
|
|
|
|
|
|
|
|
plt.savefig(output_image_path) |
|
|
plt.close() |
|
|
print(f"Source 分布饼图已保存到 {output_image_path}") |
|
|
|
|
|
|
|
|
with open('/share/project/sunshuang/deep_search/data_for_rl/musique_tagged/musique_tagged_domain_keypoints_keywords_count.json') as f: |
|
|
data = json.load(f) |
|
|
|
|
|
for idx, item in enumerate(data): |
|
|
if "question" in item: |
|
|
item["Question"] = item["question"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("step 1: preprocss domain data") |
|
|
domain_counter = defaultdict(int) |
|
|
for item in data: |
|
|
domain = item["domain_keypoints"]["domain"] |
|
|
domain_counter[domain] += 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
valid_domains = {d for d, cnt in domain_counter.items() if cnt >= 500} |
|
|
other_domains = [d for d, cnt in domain_counter.items() if cnt < 500] |
|
|
print(f"valid_domains: {len(valid_domains)}") |
|
|
print(f"other_domains: {len(other_domains)}") |
|
|
|
|
|
|
|
|
processed_data = [] |
|
|
for item in data: |
|
|
original_domain = item["domain_keypoints"]["domain"] |
|
|
if original_domain in valid_domains: |
|
|
new_domain = original_domain |
|
|
else: |
|
|
new_domain = "other" |
|
|
|
|
|
new_item = { |
|
|
**item, |
|
|
"domain_keypoints": { |
|
|
**item["domain_keypoints"], |
|
|
"domain": new_domain |
|
|
} |
|
|
} |
|
|
processed_data.append(new_item) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("step 2: calculate domain distribution") |
|
|
new_domain_counts = defaultdict(int) |
|
|
for item in processed_data: |
|
|
domain = item["domain_keypoints"]["domain"] |
|
|
new_domain_counts[domain] += 1 |
|
|
|
|
|
|
|
|
|
|
|
final_domains = list(valid_domains) |
|
|
if new_domain_counts.get("other", 0) > 0: |
|
|
final_domains.append("other") |
|
|
|
|
|
print(f"new_domain_counts: {len(new_domain_counts)}") |
|
|
print(f"final_domains: {len(final_domains)}") |
|
|
|
|
|
|
|
|
total_samples = 4000 |
|
|
num_domains = len(final_domains) |
|
|
base_quota = total_samples // num_domains |
|
|
remainder = total_samples % num_domains |
|
|
print(f"base_quota: {base_quota}") |
|
|
print(f"remainder: {remainder}") |
|
|
|
|
|
domain_quotas = {} |
|
|
|
|
|
for idx, domain in enumerate(final_domains): |
|
|
domain_quotas[domain] = base_quota + (1 if idx < remainder else 0) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print("step 3 calculating keypoints distribution") |
|
|
domain_keypoint_dist = defaultdict(lambda: defaultdict(int)) |
|
|
|
|
|
for item in tqdm(processed_data, desc="Calculating keypoints distribution"): |
|
|
domain = item["domain_keypoints"]["domain"] |
|
|
keypoints = item["domain_keypoints"]["key_points"] |
|
|
for kp in keypoints: |
|
|
domain_keypoint_dist[domain][kp] += 1 |
|
|
|
|
|
|
|
|
for domain, keypoint_counts in domain_keypoint_dist.items(): |
|
|
sorted_keypoints = sorted(keypoint_counts.items(), key=lambda x: x[1], reverse=True) |
|
|
domain_keypoint_dist[domain] = OrderedDict(sorted_keypoints) |
|
|
|
|
|
|
|
|
|
|
|
with open("domain_keypoints_distribution_without_remove_dup.json", "w") as f: |
|
|
json.dump(domain_keypoint_dist, f, indent=4,ensure_ascii=False) |
|
|
|
|
|
|
|
|
|
|
|
print("step 4: select data") |
|
|
final_dataset = [] |
|
|
question_set = set() |
|
|
|
|
|
real_selected_cnt = 0 |
|
|
for domain in tqdm(final_domains, desc="Processing domains"): |
|
|
|
|
|
domain_data = [item for item in processed_data if item["domain_keypoints"]["domain"] == domain] |
|
|
print(f"-------------- process {domain}") |
|
|
print(f"data: {len(domain_data)}") |
|
|
|
|
|
keypoints = list(domain_keypoint_dist[domain].keys()) |
|
|
|
|
|
|
|
|
print(f"key points {len(keypoints)}") |
|
|
print(f"domain_quotas: {domain_quotas[domain]}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sorted_domain_data = sorted(domain_data, key=lambda x: x["keywords_count"]["total"], reverse=True) |
|
|
|
|
|
selected_cnt = 0 |
|
|
cycle_cnt = 0 |
|
|
selected_index = [] |
|
|
while selected_cnt < domain_quotas[domain]: |
|
|
cycle_cnt += 1 |
|
|
|
|
|
selected_keypoints = set() |
|
|
for idx, item in enumerate(sorted_domain_data): |
|
|
if selected_cnt >= domain_quotas[domain]: |
|
|
break |
|
|
if idx not in selected_index: |
|
|
dup_keypoints = False |
|
|
for key_point in item["domain_keypoints"]["key_points"]: |
|
|
if key_point in selected_keypoints: |
|
|
dup_keypoints = True |
|
|
break |
|
|
if dup_keypoints: |
|
|
continue |
|
|
if item["Question"] not in question_set: |
|
|
final_dataset.append(item) |
|
|
question_set.add(item["Question"]) |
|
|
selected_keypoints.update(item["domain_keypoints"]["key_points"]) |
|
|
selected_index.append(idx) |
|
|
selected_cnt += 1 |
|
|
real_selected_cnt += selected_cnt |
|
|
print(f"cycle_cnt: {cycle_cnt}") |
|
|
|
|
|
print(f"real_selected_cnt: {real_selected_cnt}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(f"final_dataset len: {len(final_dataset)}") |
|
|
|
|
|
|
|
|
analyze_and_visualize_sources(final_dataset) |
|
|
|
|
|
|
|
|
|
|
|
with open("final_selected_dataset.json", "w") as f: |
|
|
json.dump(final_dataset, f, indent=4, ensure_ascii=False) |