File size: 5,512 Bytes
6ab6bc3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
import os
import copy
import json
import logging
from tqdm import tqdm
from dataclasses import dataclass, field
from typing import Optional, Dict, Sequence
import torch
from torch.utils.data import random_split
from torch.nn.utils.rnn import pad_sequence
import transformers
from torch.utils.data import Dataset
from transformers import Trainer
import random
from typing import List, Optional, Tuple, Union
from transformers import AutoModelForCausalLM, TrainingArguments
from datasets import load_dataset
from transformers import DataCollatorForSeq2Seq
import shutil
import json
from tqdm import tqdm
import os
import json
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
import numpy as np
# from liger_kernel.transformers import AutoLigerKernelForCausalLM
import matplotlib.pyplot as plt
import numpy as np
def plot_distribution(data, title="Distribution of First Reason Length", bins=40, name="url", xlabel="First Reason Length", ylabel="Frequency", output_path=None):
"""
绘制 first_reason_length 的分布图,并将分位点信息保存为 JSON 文件。
参数:
data (list): 包含 first_reason_length 的列表。
title (str): 图表标题,默认为 "Distribution of First Reason Length"。
bins (int): 直方图的柱子数量,默认为 40。
name (str): 输出文件的基础名。
xlabel (str): x轴标签。
ylabel (str): y轴标签。
output_path (str): 输出路径(可选),如果为 None,则保存在当前目录。
"""
sns.set(style="whitegrid")
plt.figure(figsize=(10, 6))
sns.histplot(data, kde=True, bins=bins, color="skyblue", edgecolor="black")
plt.title(title, fontsize=16)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
plt.tight_layout()
if output_path:
os.makedirs(output_path, exist_ok=True)
fig_path = os.path.join(output_path, f"{name}.png")
json_path = os.path.join(output_path, f"{name}_quantiles.json")
else:
fig_path = f"{name}.png"
json_path = f"{name}_quantiles.json"
plt.savefig(fig_path, dpi=300, bbox_inches="tight")
plt.show()
# 分位点计算
quantiles = np.arange(0.0, 1.0, 0.03)
quantile_values = np.quantile(data, quantiles)
total_count = len(data)
# 构造字典结构
quantile_stats = {
"name": name,
"quantiles": []
}
for q, value in zip(quantiles, quantile_values):
count_below = int(np.sum(np.array(data) <= value))
percentage = float(count_below / total_count * 100)
quantile_stats["quantiles"].append({
"quantile": round(float(q), 2),
"value": round(float(value), 2),
"count_below": count_below,
"percentage": round(percentage, 2)
})
# 保存为 JSON 文件
with open(json_path, "w", encoding="utf-8") as f:
json.dump(quantile_stats, f, indent=2, ensure_ascii=False)
print(f"分位点统计已保存至: {json_path}")
def save_to_json(data, filename):
"""保存数据到 JSON 文件"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
print(f"Saved to {filename}, data length: {len(data)}")
def load_json(file_path):
"""从 JSON 文件加载数据"""
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
print(f"Loaded from {file_path}, data length: {len(data)}")
return data
IGNORE_INDEX = -100
def process_math(sample, tokenizer):
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
source = sample["prompt"]
source = tokenizer.apply_chat_template(
[
{'role': 'user', 'content': source}
],
tokenize=False, add_generation_prompt=True
)
source = tokenizer(source, add_special_tokens=False)["input_ids"]
target = [IGNORE_INDEX] * len(source)
output = sample["output"]
output = tokenizer(output, add_special_tokens=False)["input_ids"]
source += output
target += output
input_ids = source
labels = target
input_ids.append(tokenizer.eos_token_id)
labels.append(tokenizer.eos_token_id)
return len(input_ids)
# 输入和输出文件路径
input_file = "/opt/aps/workdir/sunshuang/deep_search/math_data/math_qwq_4524_add_prompt.json" # 替换为你的输入文件路径
tokenizer = transformers.AutoTokenizer.from_pretrained(
"/capacity/userdata/models/Qwen2.5-7B-Instruct", model_max_length=30000
)
# 加载数据
data = load_json(input_file)
new_data = []
cnt = []
less_than_1w = []
for item in tqdm(data):
item["seq_token_len"] = process_math(item, tokenizer)
cnt.append(item["seq_token_len"])
new_data.append(item)
if item["seq_token_len"] < 12000:
less_than_1w.append(item)
plot_distribution(cnt, title="Distribution of Seq Token Length", bins=40, name="seq_token_len", xlabel="Seq Token Length", ylabel="Frequency", output_path=None)
output_file = f"/opt/aps/workdir/sunshuang/deep_search/math_data/math_qwq_4524_add_prompt_token_{len(new_data)}.json"
save_to_json(new_data, output_file)
output_file_less1w = f"/opt/aps/workdir/sunshuang/deep_search/math_data/math_qwq_4524_add_prompt_less1w_{len(less_than_1w)}.json"
save_to_json(less_than_1w, output_file_less1w) |