|
|
import json |
|
|
from tqdm import tqdm |
|
|
import os |
|
|
|
|
|
import json |
|
|
import matplotlib.pyplot as plt |
|
|
import seaborn as sns |
|
|
from collections import Counter |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import seaborn as sns |
|
|
import matplotlib.pyplot as plt |
|
|
import numpy as np |
|
|
import json |
|
|
import os |
|
|
|
|
|
def plot_distribution(data, title="Distribution of First Reason Length", bins=40, name="url", xlabel="First Reason Length", ylabel="Frequency", output_path=None): |
|
|
""" |
|
|
绘制 first_reason_length 的分布图,并将分位点信息保存为 JSON 文件。 |
|
|
|
|
|
参数: |
|
|
data (list): 包含 first_reason_length 的列表。 |
|
|
title (str): 图表标题,默认为 "Distribution of First Reason Length"。 |
|
|
bins (int): 直方图的柱子数量,默认为 40。 |
|
|
name (str): 输出文件的基础名。 |
|
|
xlabel (str): x轴标签。 |
|
|
ylabel (str): y轴标签。 |
|
|
output_path (str): 输出路径(可选),如果为 None,则保存在当前目录。 |
|
|
""" |
|
|
sns.set(style="whitegrid") |
|
|
|
|
|
plt.figure(figsize=(10, 6)) |
|
|
sns.histplot(data, kde=True, bins=bins, color="skyblue", edgecolor="black") |
|
|
plt.title(title, fontsize=16) |
|
|
plt.xlabel(xlabel, fontsize=14) |
|
|
plt.ylabel(ylabel, fontsize=14) |
|
|
plt.tight_layout() |
|
|
|
|
|
if output_path: |
|
|
os.makedirs(output_path, exist_ok=True) |
|
|
fig_path = os.path.join(output_path, f"{name}.png") |
|
|
json_path = os.path.join(output_path, f"{name}_quantiles.json") |
|
|
else: |
|
|
fig_path = f"{name}.png" |
|
|
json_path = f"{name}_quantiles.json" |
|
|
|
|
|
plt.savefig(fig_path, dpi=300, bbox_inches="tight") |
|
|
plt.show() |
|
|
|
|
|
|
|
|
quantiles = np.arange(0.0, 1.0, 0.03) |
|
|
quantile_values = np.quantile(data, quantiles) |
|
|
total_count = len(data) |
|
|
|
|
|
|
|
|
quantile_stats = { |
|
|
"name": name, |
|
|
"quantiles": [] |
|
|
} |
|
|
|
|
|
for q, value in zip(quantiles, quantile_values): |
|
|
count_below = int(np.sum(np.array(data) <= value)) |
|
|
percentage = float(count_below / total_count * 100) |
|
|
|
|
|
quantile_stats["quantiles"].append({ |
|
|
"quantile": round(float(q), 2), |
|
|
"value": round(float(value), 2), |
|
|
"count_below": count_below, |
|
|
"percentage": round(percentage, 2) |
|
|
}) |
|
|
|
|
|
|
|
|
with open(json_path, "w", encoding="utf-8") as f: |
|
|
json.dump(quantile_stats, f, indent=2, ensure_ascii=False) |
|
|
|
|
|
print(f"分位点统计已保存至: {json_path}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def process_and_visualize(data_list, filename): |
|
|
""" |
|
|
统计列表中各内容的出现次数,保存为 JSON 文件,并绘制直方图。 |
|
|
|
|
|
参数: |
|
|
data_list (list): 输入的列表数据。 |
|
|
filename (str): 输出 JSON 文件的文件名(不带扩展名)。 |
|
|
|
|
|
返回: |
|
|
None |
|
|
""" |
|
|
|
|
|
counts = Counter(data_list) |
|
|
|
|
|
|
|
|
sorted_counts = dict(sorted(counts.items(), key=lambda item: item[1], reverse=True)) |
|
|
|
|
|
|
|
|
json_filename = f"{filename}.json" |
|
|
with open(json_filename, 'w', encoding='utf-8') as json_file: |
|
|
json.dump(sorted_counts, json_file, ensure_ascii=False, indent=4) |
|
|
print(f"统计结果已保存到 {json_filename}") |
|
|
|
|
|
|
|
|
labels, values = zip(*sorted_counts.items()) |
|
|
plt.figure(figsize=(10, 6)) |
|
|
plt.bar(labels, values, color='skyblue') |
|
|
plt.xlabel('number') |
|
|
plt.ylabel('count') |
|
|
plt.title(filename) |
|
|
plt.xticks(rotation=45, ha='right') |
|
|
plt.tight_layout() |
|
|
|
|
|
|
|
|
plot_filename = f"{filename}.png" |
|
|
plt.savefig(plot_filename) |
|
|
print(f"直方图已保存到 {plot_filename}") |
|
|
|
|
|
|
|
|
plt.show() |
|
|
|
|
|
def load_json(file_path): |
|
|
with open(file_path, "r", encoding="utf-8") as file: |
|
|
data = json.load(file) |
|
|
print(f"Loaded {len(data)} items from {file_path}") |
|
|
return data |
|
|
|
|
|
def save_json(data, file_path): |
|
|
with open(file_path, "w", encoding="utf-8") as file: |
|
|
json.dump(data, file, ensure_ascii=False, indent=4) |
|
|
print(f"Saved {len(data)} items to {file_path}") |
|
|
|
|
|
|
|
|
def find_len(id, input_str): |
|
|
|
|
|
input_str = input_str.split() |
|
|
|
|
|
len_error = len(input_str) > 3000 |
|
|
|
|
|
len_stats = { |
|
|
"output_len": len(input_str), |
|
|
"len_error": len_error |
|
|
} |
|
|
return len_stats |
|
|
|
|
|
def find_output_wo_ana_len(id, input_str): |
|
|
input_str = input_str.split() |
|
|
len_stats = { |
|
|
"output_wo_ana_len": len(input_str), |
|
|
"output_wo_ana_error": False |
|
|
} |
|
|
|
|
|
return len_stats |
|
|
def find_boxed(id, input_str): |
|
|
boxed_str = "boxed{" |
|
|
input_str = input_str.split("</think>")[0] |
|
|
cnt_boxed = input_str.count(boxed_str) |
|
|
|
|
|
cnt_stats = { |
|
|
"boxed_cnt": cnt_boxed, |
|
|
"boxed_error": cnt_boxed != 0 |
|
|
} |
|
|
return cnt_stats |
|
|
|
|
|
|
|
|
def find_special_words(id, input_str): |
|
|
words = ["alternatively", 'wait'] |
|
|
input_str = input_str.lower() |
|
|
|
|
|
cnt_words = {} |
|
|
|
|
|
for word in words: |
|
|
cnt_words[word] = input_str.count(word) |
|
|
|
|
|
words_stats = { |
|
|
"words_cnt": cnt_words, |
|
|
|
|
|
"words_error": cnt_words["alternatively"] > 3 |
|
|
} |
|
|
|
|
|
return words_stats |
|
|
|
|
|
def find_lang(id, input_str): |
|
|
|
|
|
chinese_chars = [char for char in input_str if '\u4e00' <= char <= '\u9fff'] |
|
|
|
|
|
chinese_stats = { |
|
|
"count": len(chinese_chars), |
|
|
"chinese_chars": chinese_chars, |
|
|
"chinese_error": len(chinese_chars) > 0 |
|
|
} |
|
|
|
|
|
return chinese_stats |
|
|
|
|
|
|
|
|
def find_ans(id, item): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return ans_stats |
|
|
|
|
|
|
|
|
def find_last_reason_len(id, input_str): |
|
|
last_reason_len = len(input_str.split(" ")) |
|
|
|
|
|
last_reason_stats = { |
|
|
"last_reason_len": last_reason_len, |
|
|
"last_reason_error": last_reason_len > 500 |
|
|
} |
|
|
|
|
|
return last_reason_stats |
|
|
|
|
|
def find_first_reason_len(id, input_str): |
|
|
first_reason_len = len(input_str.split(" ")) |
|
|
first_reason_stats = { |
|
|
"first_reason_len": first_reason_len, |
|
|
"first_reason_error": first_reason_len > 500 |
|
|
} |
|
|
|
|
|
return first_reason_stats |
|
|
|
|
|
|
|
|
|
|
|
def find_ans_format(id, input_str): |
|
|
flag = False |
|
|
|
|
|
ans_format = { |
|
|
"ans_format_error": flag |
|
|
} |
|
|
if not input_str.endswith("}"): |
|
|
flag = True |
|
|
elif "\n</think>\n\n\\boxed{" not in input_str: |
|
|
flag = True |
|
|
else: |
|
|
flag = False |
|
|
|
|
|
ans_format["ans_format_error"] = flag |
|
|
|
|
|
return ans_format |
|
|
|
|
|
|
|
|
def find_item(id, data): |
|
|
for item in data: |
|
|
if item["id"] == id: |
|
|
return item |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
input_files = [ |
|
|
|
|
|
"/opt/aps/workdir/sunshuang/deep_search/math_data/math_qwq_4524_add_prompt_token_4524.json" |
|
|
|
|
|
] |
|
|
|
|
|
for input_file in input_files: |
|
|
|
|
|
format_error_data = [] |
|
|
error_ids = [] |
|
|
|
|
|
|
|
|
|
|
|
data = load_json(input_file) |
|
|
|
|
|
search_counts = [] |
|
|
|
|
|
|
|
|
for id, item in tqdm(enumerate(data), total=len(data)): |
|
|
output_text = item["output"] |
|
|
item["id"] = id |
|
|
|
|
|
|
|
|
item['first_reason_stats'] = find_first_reason_len(item["id"],output_text) |
|
|
item['last_reason_stats'] = find_last_reason_len(item["id"],output_text) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
item['boxed_stats'] = find_boxed(item["id"], output_text) |
|
|
item['words_stats'] = find_special_words(item["id"], output_text) |
|
|
item['lang_stats'] = find_lang(item["id"], output_text) |
|
|
|
|
|
item['len_stats'] = find_len(item["id"], output_text) |
|
|
|
|
|
item['ans_format_stats'] = find_ans_format(item["id"], output_text) |
|
|
|
|
|
|
|
|
|
|
|
filtered_data = [] |
|
|
error_data = [] |
|
|
|
|
|
|
|
|
error_count = { |
|
|
"boxed_error": 0, |
|
|
"words_error": 0, |
|
|
"chinese_error": 0, |
|
|
|
|
|
"len_error": 0, |
|
|
"last_reason_error": 0, |
|
|
"first_reason_error": 0, |
|
|
"ans_format_error": 0 |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
len_first_reason_len = [] |
|
|
len_last_reason_len = [] |
|
|
len_output_len = [] |
|
|
len_output_wo_ana_len = [] |
|
|
|
|
|
cnt_words_alternatively = [] |
|
|
cnt_words_wait = [] |
|
|
|
|
|
for item in data: |
|
|
len_first_reason_len.append(item['first_reason_stats']['first_reason_len']) |
|
|
len_last_reason_len.append(item['last_reason_stats']['last_reason_len']) |
|
|
|
|
|
len_output_len.append(item['len_stats']['output_len']) |
|
|
cnt_words_alternatively.append(item['words_stats']['words_cnt']['alternatively']) |
|
|
cnt_words_wait.append(item['words_stats']['words_cnt']['wait']) |
|
|
|
|
|
|
|
|
|
|
|
if item['lang_stats']['chinese_error'] or item['boxed_stats']['boxed_error'] or item['len_stats']['len_error'] or item["words_stats"]["words_error"] or item['first_reason_stats']['first_reason_error'] or item['last_reason_stats']['last_reason_error'] or item['ans_format_stats']['ans_format_error']: |
|
|
|
|
|
error_data.append(item) |
|
|
for key, value in item.items(): |
|
|
if key.endswith("_stats"): |
|
|
for k, v in value.items(): |
|
|
if k in error_count and v: |
|
|
error_count[k] += 1 |
|
|
continue |
|
|
|
|
|
|
|
|
filtered_data.append(item) |
|
|
|
|
|
|
|
|
error_count["count_overall_error"] = len(error_data) |
|
|
|
|
|
error_count["alternatively_count"] = sum(cnt_words_alternatively) / len(cnt_words_alternatively) |
|
|
error_count["wait_count"] = sum(cnt_words_wait) / len(cnt_words_wait) |
|
|
error_count["first_reason_len"] = sum(len_first_reason_len) / len(len_first_reason_len) |
|
|
error_count["last_reason_len"] = sum(len_last_reason_len) / len(len_last_reason_len) |
|
|
error_count["average_len"] = sum(len_output_len) / len(len_output_len) |
|
|
|
|
|
error_count["error_ratios"] = [ |
|
|
{key: value / len(data)} for key, value in error_count.items() if key.endswith("_error") |
|
|
] |
|
|
|
|
|
print(error_count) |
|
|
|
|
|
|
|
|
base_dir = os.path.join(os.path.dirname(input_file), "stats_1") |
|
|
os.makedirs(base_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
plot_distribution(len_first_reason_len, title="Distribution of First Reason Length", bins=40, name="first_reason_length", xlabel="First Reason Length", ylabel="Frequency", output_path=base_dir) |
|
|
plot_distribution(len_last_reason_len, title="Distribution of Last Reason Length", bins=40, name="last_reason_length", xlabel="Last Reason Length", ylabel="Frequency", output_path=base_dir) |
|
|
plot_distribution(len_output_len, title="Distribution of Output Length", bins=40, name="output_length", xlabel="Output Length", ylabel="Frequency", output_path=base_dir) |
|
|
|
|
|
plot_distribution(cnt_words_alternatively, title="Distribution of Alternatively Count", bins=100, name="alternatively_count", xlabel="Words Count", ylabel="Frequency", output_path=base_dir) |
|
|
plot_distribution(cnt_words_wait, title="Distribution of Wait Count", bins=100, name="wait_count", xlabel="Words Count", ylabel="Frequency", output_path=base_dir) |
|
|
|
|
|
|
|
|
|
|
|
output_error_file = os.path.join(base_dir, "error_stats.json") |
|
|
save_json(error_count, output_error_file) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|