ShijiaD commited on
Commit
502eb2c
·
verified ·
1 Parent(s): 4b10e81

Delete train_ast.py

Browse files
Files changed (1) hide show
  1. train_ast.py +0 -214
train_ast.py DELETED
@@ -1,214 +0,0 @@
1
- # 说明:
2
- # - 本脚本采用 LLaMA-3.1-8B(4bit)+ LoRA 微调,开启梯度检查点。
3
- # - 启用“packing 感知”的 GA(gradient_accumulation_steps)估算:
4
- # 通过对样本长度做贪心装箱,估计 packing 后的有效填充率 f_pack,
5
- # 据此计算每步大致 50k tokens/update 的梯度累积步数。
6
- # - 训练统计(总 tokens / 每步 tokens / 峰值显存 / 用时)会写入 outputs/training_summary.txt。
7
- #
8
-
9
- # =========================================================
10
-
11
- from unsloth import FastLanguageModel, is_bfloat16_supported
12
- import torch, time, numpy as np
13
- from transformers import TrainingArguments
14
- from trl import SFTTrainer
15
- from datasets import load_dataset
16
-
17
- # ===== 基本配置 =====
18
- max_seq_length = 5000
19
- per_device_bs = 10
20
- TARGET_TOKENS_PER_UPDATE = 50_000 # 按 token 计费的目标预算(每步约 5 万 token)
21
-
22
- # ===== 分布式世界大小工具 =====
23
- def get_world_size():
24
- if torch.distributed.is_available() and torch.distributed.is_initialized():
25
- return torch.distributed.get_world_size()
26
- return 1
27
-
28
- # Step 1: 加载 8k 上下文的预训练模型(4bit 量化)
29
- model, tokenizer = FastLanguageModel.from_pretrained(
30
- model_name = "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit",
31
- max_seq_length = max_seq_length,
32
- dtype = None,
33
- load_in_4bit = True,
34
- )
35
-
36
- # Step 2: 应用 LoRA(轻量 rank + 小 dropout;保留你当前设置)
37
- model = FastLanguageModel.get_peft_model(
38
- model,
39
- r = 16,
40
- target_modules = [
41
- "q_proj", "k_proj", "v_proj", "o_proj",
42
- "gate_proj", "up_proj", "down_proj"
43
- ],
44
- lora_alpha = 16,
45
- lora_dropout = 0.05, # 注:若追求 Unsloth “全速补丁”,可在**另一个实验**中将其设为 0.0
46
- bias = "none",
47
- use_gradient_checkpointing = "unsloth", # 降显存
48
- random_state = 3407,
49
- use_rslora = False,
50
- loftq_config = None,
51
- )
52
-
53
- # Step 3: 数据与格式化(当前为 AST -> 描述)
54
- # 【若切换到 Code 输入:将文案"AST sequence"改为"Code",并在 PROMPT 中将 ### AST: 改为 ### Code:】
55
- alpaca_prompt = r"""
56
-
57
- Given the following AST sequence, generate a natural language description for it.
58
-
59
- ### AST:
60
- {}
61
-
62
- ### Description:
63
- {}"""
64
-
65
- EOS_TOKEN = tokenizer.eos_token
66
-
67
- def formatting_prompts_func(examples):
68
- # 【若切换输入形式:将下行的 'sequence' 替换为相应字段(如 'code'、'sbt' 等)】
69
- sequences = examples["sequence"]
70
- docstrings = examples["docstring"]
71
- texts = []
72
- for seq, doc in zip(sequences, docstrings):
73
- texts.append(alpaca_prompt.format(seq, doc) + EOS_TOKEN)
74
- return {"text": texts}
75
-
76
- # 【若切换输入形式:替换数据集名称/拆分,例如 ShijiaD/CodeXGLUE-Code-Docstring 等】
77
- dataset = load_dataset("ShijiaD/workshop-ast-compress-docstring", split="train")
78
- dataset = dataset.map(formatting_prompts_func, batched=True)
79
-
80
- # ====== packing 感知的 GA 估算(通过贪心装箱估计 f_pack) ======
81
- WORLD_SIZE = get_world_size()
82
-
83
- SAMPLE_SIZE = min(5000, len(dataset))
84
- if SAMPLE_SIZE < len(dataset):
85
- sample_idx = np.random.RandomState(3407).choice(len(dataset), size=SAMPLE_SIZE, replace=False)
86
- sample_ds = dataset.select(sample_idx)
87
- else:
88
- sample_ds = dataset
89
-
90
- # 1) 统计每条样本的 token 长度(不截断,仅用于估算)
91
- def _len_map(batch):
92
- enc = tokenizer(batch["text"], add_special_tokens=True, truncation=False)
93
- ids = enc["input_ids"]
94
- if isinstance(ids[0], list):
95
- lens = [len(x) for x in ids]
96
- else:
97
- lens = [len(ids)]
98
- return {"_len": lens}
99
-
100
- lens = sample_ds.map(_len_map, batched=True, desc="Tokenizing for packing estimate")["_len"]
101
- lens = [int(x) for x in lens if isinstance(x, (int, np.integer)) and x > 0]
102
- if len(lens) == 0:
103
- raise RuntimeError("长度估算失败:样本长度列表为空。")
104
-
105
- # 2) 贪心装箱,模拟 TRL 的 packing(按 max_seq_length 填充,样本间估计加入 1 个分隔 token)
106
- sep_cost = 1
107
- bins_used_tokens = [] # 每个“箱子”中有效内容 token 数
108
- current = 0
109
- for L in lens:
110
- if L > max_seq_length:
111
- # 超窗样本在训练中会被截断;此处按上限计入估算
112
- L = max_seq_length
113
- need = (sep_cost if current > 0 else 0) + L
114
- if current + need > max_seq_length:
115
- bins_used_tokens.append(current)
116
- current = L
117
- else:
118
- current += need
119
- if current > 0:
120
- bins_used_tokens.append(current)
121
-
122
- total_capacity = len(bins_used_tokens) * max_seq_length
123
- total_used = sum(bins_used_tokens)
124
- f_pack = float(total_used) / float(total_capacity) if total_capacity > 0 else 1.0
125
- f_pack = min(max(f_pack, 1e-3), 1.0) # 保证数值稳定
126
-
127
- # 3) 按 f_pack 估算每步 token,并计算 GA
128
- tokens_per_micro = per_device_bs * max_seq_length * f_pack * WORLD_SIZE
129
- raw_accum = TARGET_TOKENS_PER_UPDATE / max(1.0, tokens_per_micro)
130
- gradient_accumulation_steps = max(1, int(round(raw_accum)))
131
-
132
- approx_tokens_per_update = int(round(per_device_bs * max_seq_length * f_pack * gradient_accumulation_steps * WORLD_SIZE))
133
- print(
134
- f"[TokenBudget/packing-aware] fill≈{f_pack:.3f}, per_device_bs={per_device_bs}, "
135
- f"GA={gradient_accumulation_steps}, ≈tokens/update={approx_tokens_per_update:,} "
136
- f"(target≈{TARGET_TOKENS_PER_UPDATE:,})"
137
- )
138
-
139
- # Step 4: 训练参数
140
- training_args = TrainingArguments(
141
- per_device_train_batch_size = per_device_bs,
142
- gradient_accumulation_steps = gradient_accumulation_steps, # ← 动态 GA
143
- num_train_epochs = 3,
144
- warmup_ratio = 0.05,
145
- learning_rate = 5e-5,
146
- fp16 = False,
147
- bf16 = True,
148
- logging_steps = 20,
149
- optim = "adamw_8bit",
150
- weight_decay = 0.01,
151
- lr_scheduler_type = "linear",
152
- gradient_checkpointing = True,
153
- seed = 3407,
154
- output_dir = "outputs",
155
- save_strategy = "epoch",
156
- )
157
-
158
- # 构建 Trainer(packing=True)
159
- trainer = SFTTrainer(
160
- model = model,
161
- tokenizer = tokenizer,
162
- train_dataset = dataset,
163
- dataset_text_field = "text",
164
- max_seq_length = max_seq_length,
165
- packing = True, # 保持与估算口径一致
166
- dataset_num_proc = 2,
167
- args = training_args,
168
- )
169
-
170
- # Step 5: GPU 信息
171
- gpu_stats = torch.cuda.get_device_properties(0)
172
- max_memory = round(gpu_stats.total_memory / 1024**3, 3)
173
- start_reserved = round(torch.cuda.max_memory_reserved() / 1024**3, 3)
174
- print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
175
- print(f"Reserved before train: {start_reserved} GB.")
176
-
177
- # Step 6: 训练与计时
178
- t0 = time.time()
179
- trainer_stats = trainer.train()
180
- t1 = time.time()
181
- elapsed_hours = (t1 - t0) / 3600.0
182
-
183
- # Step 7: 训练统计(粗略估计,与 f_pack/GA 口径一致)
184
- used_memory = round(torch.cuda.max_memory_reserved() / 1024**3, 3)
185
- used_for_lora = round(used_memory - start_reserved, 3)
186
- print(f"Peak reserved memory = {used_memory} GB.")
187
- print(f"Peak reserved memory for training = {used_for_lora} GB.")
188
- print(f"Peak reserved memory % of max memory = {used_memory/max_memory*100:.2f}%.")
189
-
190
- total_steps = trainer.state.global_step
191
- approx_tokens_per_micro = int(round(per_device_bs * max_seq_length * f_pack * WORLD_SIZE))
192
- approx_tokens_per_update = approx_tokens_per_micro * gradient_accumulation_steps
193
- total_train_tokens = int(total_steps * approx_tokens_per_update)
194
-
195
- summary_lines = []
196
- summary_lines.append("\n================= Training Summary =================")
197
- summary_lines.append(f"Total train tokens (approx): {total_train_tokens:,}")
198
- summary_lines.append(f"Avg tokens / microbatch: {approx_tokens_per_micro:,}")
199
- summary_lines.append(f"Approx tokens / update: {approx_tokens_per_update:,}")
200
- summary_lines.append(f"Elapsed time: {elapsed_hours:.2f} hours")
201
- summary_lines.append(f"Peak reserved memory: {used_memory} GB")
202
- summary_lines.append("====================================================\n")
203
-
204
- print("\n".join(summary_lines))
205
-
206
- import os
207
- os.makedirs("outputs", exist_ok=True)
208
- with open("outputs/training_summary.txt", "w", encoding="utf-8") as f:
209
- f.write("\n".join(summary_lines))
210
-
211
- # Step 8: 保存模型(当前保存至 lora_model_ast)
212
- # 【若切换输入形式:可更换目录名以便区分,例如 lora_model_code / lora_model_sbt】
213
- model.save_pretrained("lora_model_ast")
214
- tokenizer.save_pretrained("lora_model_ast")