SunSec's picture
Add files using upload-large-folder tool
6ab6bc3 verified
import json
from tqdm import tqdm
import os
import json
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import numpy as np
# def plot_distribution(data, title="Distribution of First Reason Length", bins=40, name="url", xlabel="First Reason Length", ylabel="Frequency", output_path=None):
# """
# 绘制 first_reason_length 的分布图。
# 参数:
# data (list): 包含 first_reason_length 的列表。
# title (str): 图表标题,默认为 "Distribution of First Reason Length"。
# bins (int): 直方图的柱子数量,默认为 20。
# """
# # 设置绘图风格
# sns.set(style="whitegrid")
# # 创建直方图和核密度估计图
# plt.figure(figsize=(10, 6))
# sns.histplot(data, kde=True, bins=bins, color="skyblue", edgecolor="black")
# # 添加标题和标签
# plt.title(title, fontsize=16)
# plt.xlabel(xlabel, fontsize=14)
# plt.ylabel(ylabel, fontsize=14)
# # 显示图形
# plt.show()
# # plt.savefig(f"{name}.pdf", dpi=300, bbox_inches="tight")
# if output_path:
# plt.savefig(f"{output_path}/{name}.png", dpi=300, bbox_inches="tight")
# else:
# plt.savefig(f"{name}.png", dpi=300, bbox_inches="tight")
# quantiles = np.arange(0.8, 1.0, 0.03) # 0.1 到 0.9 的分位点
# quantile_values = np.quantile(data, quantiles) # 分位点对应的值
# total_count = len(data) # 数据总数
# print(f"NAME: {name}")
# print("分位点统计:")
# for q, value in zip(quantiles, quantile_values):
# count_below = np.sum(np.array(data) <= value) # 小于等于当前分位点的数量
# percentage = count_below / total_count * 100 # 占比
# print(f"分位点 {q:.2f}: "
# f"值 = {value:.2f}, "
# f"数量 = {count_below}, "
# f"占比 = {percentage:.2f}%")
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import json
import os
def plot_distribution(data, title="Distribution of First Reason Length", bins=40, name="url", xlabel="First Reason Length", ylabel="Frequency", output_path=None):
"""
绘制 first_reason_length 的分布图,并将分位点信息保存为 JSON 文件。
参数:
data (list): 包含 first_reason_length 的列表。
title (str): 图表标题,默认为 "Distribution of First Reason Length"。
bins (int): 直方图的柱子数量,默认为 40。
name (str): 输出文件的基础名。
xlabel (str): x轴标签。
ylabel (str): y轴标签。
output_path (str): 输出路径(可选),如果为 None,则保存在当前目录。
"""
sns.set(style="whitegrid")
plt.figure(figsize=(10, 6))
sns.histplot(data, kde=True, bins=bins, color="skyblue", edgecolor="black")
plt.title(title, fontsize=16)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
plt.tight_layout()
if output_path:
os.makedirs(output_path, exist_ok=True)
fig_path = os.path.join(output_path, f"{name}.png")
json_path = os.path.join(output_path, f"{name}_quantiles.json")
else:
fig_path = f"{name}.png"
json_path = f"{name}_quantiles.json"
plt.savefig(fig_path, dpi=300, bbox_inches="tight")
plt.show()
# 分位点计算
quantiles = np.arange(0.0, 1.0, 0.03)
quantile_values = np.quantile(data, quantiles)
total_count = len(data)
# 构造字典结构
quantile_stats = {
"name": name,
"quantiles": []
}
for q, value in zip(quantiles, quantile_values):
count_below = int(np.sum(np.array(data) <= value))
percentage = float(count_below / total_count * 100)
quantile_stats["quantiles"].append({
"quantile": round(float(q), 2),
"value": round(float(value), 2),
"count_below": count_below,
"percentage": round(percentage, 2)
})
# 保存为 JSON 文件
with open(json_path, "w", encoding="utf-8") as f:
json.dump(quantile_stats, f, indent=2, ensure_ascii=False)
print(f"分位点统计已保存至: {json_path}")
def process_and_visualize(data_list, filename):
"""
统计列表中各内容的出现次数,保存为 JSON 文件,并绘制直方图。
参数:
data_list (list): 输入的列表数据。
filename (str): 输出 JSON 文件的文件名(不带扩展名)。
返回:
None
"""
# 1. 统计每个内容的出现次数
counts = Counter(data_list)
# 2. 按照次数降序排序
sorted_counts = dict(sorted(counts.items(), key=lambda item: item[1], reverse=True))
# 3. 保存为 JSON 文件
json_filename = f"{filename}.json"
with open(json_filename, 'w', encoding='utf-8') as json_file:
json.dump(sorted_counts, json_file, ensure_ascii=False, indent=4)
print(f"统计结果已保存到 {json_filename}")
# 4. 绘制直方图
labels, values = zip(*sorted_counts.items())
plt.figure(figsize=(10, 6))
plt.bar(labels, values, color='skyblue')
plt.xlabel('number')
plt.ylabel('count')
plt.title(filename)
plt.xticks(rotation=45, ha='right') # 旋转标签以便显示更清晰
plt.tight_layout() # 自动调整布局以避免重叠
# 5. 保存直方图为图片文件
plot_filename = f"{filename}.png"
plt.savefig(plot_filename)
print(f"直方图已保存到 {plot_filename}")
# 6. 显示直方图(可选)
plt.show()
def load_json(file_path):
with open(file_path, "r", encoding="utf-8") as file:
data = json.load(file)
print(f"Loaded {len(data)} items from {file_path}")
return data
def save_json(data, file_path):
with open(file_path, "w", encoding="utf-8") as file:
json.dump(data, file, ensure_ascii=False, indent=4)
print(f"Saved {len(data)} items to {file_path}")
def find_len(id, input_str):
# input_str = input_str.split(" ")
input_str = input_str.split()
len_error = len(input_str) > 3000
len_stats = {
"output_len": len(input_str),
"len_error": len_error
}
return len_stats
def find_output_wo_ana_len(id, input_str):
input_str = input_str.split()
len_stats = {
"output_wo_ana_len": len(input_str),
"output_wo_ana_error": False
}
return len_stats
def find_boxed(id, input_str):
boxed_str = "boxed{"
input_str = input_str.split("</think>")[0]
cnt_boxed = input_str.count(boxed_str)
cnt_stats = {
"boxed_cnt": cnt_boxed,
"boxed_error": cnt_boxed != 0
}
return cnt_stats
def find_special_words(id, input_str):
words = ["alternatively", 'wait']
input_str = input_str.lower()
cnt_words = {}
for word in words:
cnt_words[word] = input_str.count(word)
words_stats = {
"words_cnt": cnt_words,
# "words_error": any([cnt_words[word] > 3 for word in words])
"words_error": cnt_words["alternatively"] > 3
}
return words_stats
def find_lang(id, input_str):
# have_chinese = ['\u4e00' <= char <= '\u9fff' for char in input_str]
chinese_chars = [char for char in input_str if '\u4e00' <= char <= '\u9fff']
chinese_stats = {
"count": len(chinese_chars),
"chinese_chars": chinese_chars,
"chinese_error": len(chinese_chars) > 0
}
return chinese_stats
def find_ans(id, item):
# if item['metric']['acc'] == 1 and item['metric']['em'] == 0:
# # if id not in format_error_data:
# # format_error_data[id] = {}
# ans_stats = {
# "answer": item['answer'],
# "ans_em_error": True,
# }
# else:
# ans_stats = {
# "answer": item['answer'],
# "ans_em_error": False,
# }
return ans_stats
def find_last_reason_len(id, input_str):
last_reason_len = len(input_str.split(" "))
last_reason_stats = {
"last_reason_len": last_reason_len,
"last_reason_error": last_reason_len > 500
}
return last_reason_stats
def find_first_reason_len(id, input_str):
first_reason_len = len(input_str.split(" "))
first_reason_stats = {
"first_reason_len": first_reason_len,
"first_reason_error": first_reason_len > 500
}
return first_reason_stats
def find_ans_format(id, input_str):
flag = False
ans_format = {
"ans_format_error": flag
}
if not input_str.endswith("}"):
flag = True
elif "\n</think>\n\n\\boxed{" not in input_str:
flag = True
else:
flag = False
# print(flag)
ans_format["ans_format_error"] = flag
# print(ans_format)
return ans_format
def find_item(id, data):
for item in data:
if item["id"] == id:
return item
if __name__ == "__main__":
input_files = [
# "/opt/aps/workdir/sunshuang/deep_search/math_data/selected_data_871.json"
"/opt/aps/workdir/sunshuang/deep_search/math_data/math_qwq_4524_add_prompt_token_4524.json"
]
for input_file in input_files:
format_error_data = []
error_ids = []
# input_file = "/opt/aps/workdir/sunshuang/deep_search/search_o1/output/output_eval_sft/qwq_sft_871_ckpt_41/eval/turn_12.json"
data = load_json(input_file)
search_counts = []
for id, item in tqdm(enumerate(data), total=len(data)): # 遍历数据,标记每个数据中的错误
output_text = item["output"]
item["id"] = id
# search_counts.append(item["search_count"])
# search_cnt = 0
item['first_reason_stats'] = find_first_reason_len(item["id"],output_text)
item['last_reason_stats'] = find_last_reason_len(item["id"],output_text)
# for idx, turn in enumerate(item["output"]):
# for key, value in turn.items():
# # if "search" in key:
# # search_cnt += 1
# # if key in ["gen", "doc_gen"]:
# if
# if idx == 0:
# # item["first_reason_length"] = len(value.split(" "))
# item['first_reason_stats'] = find_first_reason_len(item["id"],value)
# elif idx == len(item["output"]) -1:
# # item["last_reason_length"] = len(value.split(" "))
# item['last_reason_stats'] = find_last_reason_len(item["id"],value)
# # if idx == len(item["output"]) -1:
# # value = process_string(value)
# # output_text += value
# item["output_text"] = output_text
item['boxed_stats'] = find_boxed(item["id"], output_text)
item['words_stats'] = find_special_words(item["id"], output_text)
item['lang_stats'] = find_lang(item["id"], output_text)
# item['ans_stats'] = find_ans(item["id"], item)
item['len_stats'] = find_len(item["id"], output_text)
# item['search_cnt'] = search_cnt
item['ans_format_stats'] = find_ans_format(item["id"], output_text)
# 筛选数据
filtered_data = []
error_data = []
# 遍历format_error_data,分别统计每个错误的数量
error_count = {
"boxed_error": 0,
"words_error": 0,
"chinese_error": 0,
# "ans_em_error": 0,
"len_error": 0,
"last_reason_error": 0,
"first_reason_error": 0,
"ans_format_error": 0
}
# 收集一下first reason的长度和last reason的长度
len_first_reason_len = []
len_last_reason_len = []
len_output_len = []
len_output_wo_ana_len = []
# cnt_search = []
cnt_words_alternatively = []
cnt_words_wait = []
for item in data:
len_first_reason_len.append(item['first_reason_stats']['first_reason_len'])
len_last_reason_len.append(item['last_reason_stats']['last_reason_len'])
# len_output_wo_ana_len.append(item['output_wo_ana_stats']['output_wo_ana_len'])
len_output_len.append(item['len_stats']['output_len'])
cnt_words_alternatively.append(item['words_stats']['words_cnt']['alternatively'])
cnt_words_wait.append(item['words_stats']['words_cnt']['wait'])
# cnt_search.append(item['search_cnt'])
if item['lang_stats']['chinese_error'] or item['boxed_stats']['boxed_error'] or item['len_stats']['len_error'] or item["words_stats"]["words_error"] or item['first_reason_stats']['first_reason_error'] or item['last_reason_stats']['last_reason_error'] or item['ans_format_stats']['ans_format_error']:
# 统计各个错误的数量
error_data.append(item)
for key, value in item.items():
if key.endswith("_stats"):
for k, v in value.items():
if k in error_count and v:
error_count[k] += 1
continue
filtered_data.append(item)
error_count["count_overall_error"] = len(error_data)
# error_count["search_count"] = sum(search_counts) / len(search_counts)
error_count["alternatively_count"] = sum(cnt_words_alternatively) / len(cnt_words_alternatively)
error_count["wait_count"] = sum(cnt_words_wait) / len(cnt_words_wait)
error_count["first_reason_len"] = sum(len_first_reason_len) / len(len_first_reason_len)
error_count["last_reason_len"] = sum(len_last_reason_len) / len(len_last_reason_len)
error_count["average_len"] = sum(len_output_len) / len(len_output_len)
error_count["error_ratios"] = [
{key: value / len(data)} for key, value in error_count.items() if key.endswith("_error")
]
print(error_count)
# 创建一个文件夹
base_dir = os.path.join(os.path.dirname(input_file), "stats_1")
os.makedirs(base_dir, exist_ok=True)
# 分别绘制first reason的长度分布,last reason的长度分布和output的长度分布
plot_distribution(len_first_reason_len, title="Distribution of First Reason Length", bins=40, name="first_reason_length", xlabel="First Reason Length", ylabel="Frequency", output_path=base_dir)
plot_distribution(len_last_reason_len, title="Distribution of Last Reason Length", bins=40, name="last_reason_length", xlabel="Last Reason Length", ylabel="Frequency", output_path=base_dir)
plot_distribution(len_output_len, title="Distribution of Output Length", bins=40, name="output_length", xlabel="Output Length", ylabel="Frequency", output_path=base_dir)
# plot_distribution(search_counts, title="Distribution of Search Count", bins=40, name="search_count", xlabel="Search Count", ylabel="Frequency", output_path=base_dir)
plot_distribution(cnt_words_alternatively, title="Distribution of Alternatively Count", bins=100, name="alternatively_count", xlabel="Words Count", ylabel="Frequency", output_path=base_dir)
plot_distribution(cnt_words_wait, title="Distribution of Wait Count", bins=100, name="wait_count", xlabel="Words Count", ylabel="Frequency", output_path=base_dir)
# 保存一下各个错误的信息
output_error_file = os.path.join(base_dir, "error_stats.json")
save_json(error_count, output_error_file)
# # 保存筛选后的数据
# output_filtered_file = os.path.join(base_dir, "filtered_data.json")
# save_json(filtered_data, output_filtered_file)
# # 保存标注后的数据
# output_tagged_file = os.path.join(base_dir, "tagged_data.json")
# save_json(data, output_tagged_file)
# print(f"len(filtered_data): {len(filtered_data)}")