SunSec's picture
Add files using upload-large-folder tool
6ab6bc3 verified
import os
import copy
import json
import logging
from tqdm import tqdm
from dataclasses import dataclass, field
from typing import Optional, Dict, Sequence
import torch
from torch.utils.data import random_split
from torch.nn.utils.rnn import pad_sequence
import transformers
from torch.utils.data import Dataset
from transformers import Trainer
import random
from typing import List, Optional, Tuple, Union
from transformers import AutoModelForCausalLM, TrainingArguments
from datasets import load_dataset
from transformers import DataCollatorForSeq2Seq
import shutil
import json
from tqdm import tqdm
import os
import json
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import numpy as np
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
import matplotlib.pyplot as plt
import numpy as np
def plot_distribution(data, title="Distribution of First Reason Length", bins=40, name="url", xlabel="First Reason Length", ylabel="Frequency", output_path=None):
"""
绘制 first_reason_length 的分布图,并将分位点信息保存为 JSON 文件。
参数:
data (list): 包含 first_reason_length 的列表。
title (str): 图表标题,默认为 "Distribution of First Reason Length"。
bins (int): 直方图的柱子数量,默认为 40。
name (str): 输出文件的基础名。
xlabel (str): x轴标签。
ylabel (str): y轴标签。
output_path (str): 输出路径(可选),如果为 None,则保存在当前目录。
"""
sns.set(style="whitegrid")
plt.figure(figsize=(10, 6))
sns.histplot(data, kde=True, bins=bins, color="skyblue", edgecolor="black")
plt.title(title, fontsize=16)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
plt.tight_layout()
if output_path:
os.makedirs(output_path, exist_ok=True)
fig_path = os.path.join(output_path, f"{name}.png")
json_path = os.path.join(output_path, f"{name}_quantiles.json")
else:
fig_path = f"{name}.png"
json_path = f"{name}_quantiles.json"
plt.savefig(fig_path, dpi=300, bbox_inches="tight")
plt.show()
# 分位点计算
quantiles = np.arange(0.0, 1.0, 0.03)
quantile_values = np.quantile(data, quantiles)
total_count = len(data)
# 构造字典结构
quantile_stats = {
"name": name,
"quantiles": []
}
for q, value in zip(quantiles, quantile_values):
count_below = int(np.sum(np.array(data) <= value))
percentage = float(count_below / total_count * 100)
quantile_stats["quantiles"].append({
"quantile": round(float(q), 2),
"value": round(float(value), 2),
"count_below": count_below,
"percentage": round(percentage, 2)
})
# 保存为 JSON 文件
with open(json_path, "w", encoding="utf-8") as f:
json.dump(quantile_stats, f, indent=2, ensure_ascii=False)
print(f"分位点统计已保存至: {json_path}")
def save_to_json(data, filename):
"""保存数据到 JSON 文件"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
print(f"Saved to {filename}, data length: {len(data)}")
def load_json(file_path):
"""从 JSON 文件加载数据"""
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
print(f"Loaded from {file_path}, data length: {len(data)}")
return data
IGNORE_INDEX = -100
def process_math(sample, tokenizer):
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
source = sample["prompt"]
source = tokenizer.apply_chat_template(
[
{'role': 'user', 'content': source}
],
tokenize=False, add_generation_prompt=True
)
source = tokenizer(source, add_special_tokens=False)["input_ids"]
target = [IGNORE_INDEX] * len(source)
output = sample["output"]
output = tokenizer(output, add_special_tokens=False)["input_ids"]
source += output
target += output
input_ids = source
labels = target
input_ids.append(tokenizer.eos_token_id)
labels.append(tokenizer.eos_token_id)
return len(input_ids)
# 输入和输出文件路径
input_file = "/opt/aps/workdir/sunshuang/deep_search/math_data/math_qwq_4524_add_prompt.json" # 替换为你的输入文件路径
tokenizer = transformers.AutoTokenizer.from_pretrained(
"/capacity/userdata/models/Qwen2.5-7B-Instruct", model_max_length=30000
)
# 加载数据
data = load_json(input_file)
new_data = []
cnt = []
less_than_1w = []
for item in tqdm(data):
item["seq_token_len"] = process_math(item, tokenizer)
cnt.append(item["seq_token_len"])
new_data.append(item)
if item["seq_token_len"] < 12000:
less_than_1w.append(item)
plot_distribution(cnt, title="Distribution of Seq Token Length", bins=40, name="seq_token_len", xlabel="Seq Token Length", ylabel="Frequency", output_path=None)
output_file = f"/opt/aps/workdir/sunshuang/deep_search/math_data/math_qwq_4524_add_prompt_token_{len(new_data)}.json"
save_to_json(new_data, output_file)
output_file_less1w = f"/opt/aps/workdir/sunshuang/deep_search/math_data/math_qwq_4524_add_prompt_less1w_{len(less_than_1w)}.json"
save_to_json(less_than_1w, output_file_less1w)