sheep33333 commited on
Commit
a7dc2c4
·
verified ·
1 Parent(s): 93e682d

Upload train3.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train3.py +472 -0
train3.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pdb
3
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, OlmoeForCausalLM, OlmoeModel
4
+ from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
5
+ import copy
6
+ from transformers.modeling_outputs import (
7
+ MoeCausalLMOutputWithPast,
8
+ MoeModelOutputWithPast,
9
+ )
10
+ from collections import defaultdict
11
+ import numpy as np
12
+ import math
13
+ from torch import nn
14
+ import pandas as pd
15
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
16
+ from dataclasses import dataclass
17
+ # from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
18
+ # from transformers.models.olmoe.modeling_olmoe import OlmoeMLP, OlmoeAttention, OlmoeFlashAttention2, OlmoeSdpaAttention, OlmoeRMSNorm, OlmoeSparseMoeBlock, apply_rotary_pos_emb, repeat_kv, OlmoeRotaryEmbedding
19
+ import os
20
+ import sys
21
+ import torch.distributed as dist
22
+ from tqdm import tqdm
23
+ from torch.utils.data import DataLoader
24
+ from torch.utils.data.distributed import DistributedSampler
25
+ import transformers
26
+ import pickle
27
+
28
+ # from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
29
+ from dataset import *
30
+ # from utils import flash_attn_forward, flash_attn_prepare_decoder_attention_mask, get_multiround_data
31
+ from peft import (get_peft_model, PeftModel)
32
+ import random
33
+ from config import *
34
+ from datasets import Dataset, DatasetDict, load_dataset
35
+ import wandb
36
+ import gc
37
+ import os
38
+ import argparse
39
+ import torch
40
+ import torch.nn as nn
41
+ import torch.nn.functional as F
42
+ import torch.optim as optim
43
+ import functools
44
+ from torch.optim.lr_scheduler import StepLR
45
+ import torch.nn.functional as F
46
+ import torch.distributed as dist
47
+ import torch.multiprocessing as mp
48
+ from torch.nn.parallel import DistributedDataParallel as DDP
49
+ from torch.utils.data.distributed import DistributedSampler
50
+
51
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
52
+ checkpoint_wrapper, CheckpointImpl)
53
+
54
+ from torch.distributed.fsdp import (
55
+ FullyShardedDataParallel as FSDP,
56
+ MixedPrecision,
57
+ BackwardPrefetch,
58
+ ShardingStrategy,
59
+ FullStateDictConfig,
60
+ StateDictType,
61
+ )
62
+ from torch.distributed.fsdp.wrap import (
63
+ transformer_auto_wrap_policy,
64
+ enable_wrap,
65
+ wrap,
66
+ )
67
+ from functools import partial
68
+ from torch.utils.data import DataLoader
69
+ from pathlib import Path
70
+ from typing import Type, List, Optional, Tuple, Union
71
+ from modelforseminat_v4 import *
72
+
73
+ from torch.optim.lr_scheduler import _LRScheduler
74
+
75
+ class WarmupCosineScheduler(_LRScheduler):
76
+
77
+ def __init__(self,
78
+ optimizer,
79
+ warmup_steps,
80
+ total_steps,
81
+ min_lr=0.0,
82
+ last_epoch=-1):
83
+ # self.warmup_steps = warmup_steps
84
+ self.total_steps = total_steps
85
+ self.min_lr = min_lr
86
+ if isinstance(warmup_steps, float) and 0 < warmup_steps < 1:
87
+ self.warmup_steps = int(warmup_steps * total_steps)
88
+ else:
89
+ self.warmup_steps = int(warmup_steps)
90
+ super().__init__(optimizer, last_epoch)
91
+
92
+ def get_lr(self):
93
+ step = self.last_epoch + 1
94
+ lrs = []
95
+
96
+ for base_lr in self.base_lrs:
97
+ if step < self.warmup_steps:
98
+ # Linear warmup
99
+ lr = base_lr * step / self.warmup_steps
100
+ else:
101
+ # Cosine decay
102
+ progress = (step - self.warmup_steps) / max(
103
+ 1, self.total_steps - self.warmup_steps)
104
+ cosine_decay = 0.5 * (1 + math.cos(math.pi * progress))
105
+ lr = self.min_lr + (base_lr - self.min_lr) * cosine_decay
106
+
107
+ lrs.append(lr)
108
+
109
+ return lrs
110
+
111
+
112
+
113
+ ################################# FSDP Config #####################################
114
+ def setup():
115
+ # initialize the process group
116
+ local_rank = int(os.environ['LOCAL_RANK'])
117
+ torch.cuda.set_device(local_rank)
118
+ dist.init_process_group(
119
+ backend='nccl',
120
+ init_method='env://',
121
+ )
122
+
123
+
124
+ def cleanup():
125
+ gc.collect()
126
+ torch.cuda.empty_cache()
127
+ dist.destroy_process_group()
128
+
129
+
130
+ def get_fsdp_device():
131
+ # 每个进程初始化分布式环境后调用
132
+ local_rank = int(os.environ.get("LOCAL_RANK", 0)) # torchrun 自动设置
133
+ device = torch.device(f"cuda:{local_rank}")
134
+ torch.cuda.set_device(device)
135
+ return device
136
+
137
+
138
+ def load_trained_model(model_name):
139
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
140
+
141
+ olmo_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B"
142
+ pt_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat/ckp/sft-v4-1e3-len2-fc-chunklimit2-jueduipos/sft-v4-1e3-len2-fc-chunklimit2-jueduipos-steps_300.pt"
143
+ config_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B/config.json"
144
+
145
+
146
+ config = AutoConfig.from_pretrained(olmo_path)
147
+ model = Olmo2ForCausalLMForSemiNAT.from_pretrained(olmo_path,
148
+ config=config,
149
+ torch_dtype=torch.bfloat16)
150
+ state_dict = torch.load(pt_path, map_location=DEVICE, weights_only=True)
151
+ missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
152
+ print(
153
+ f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
154
+ )
155
+ if missing_keys:
156
+ print("Missing keys:", missing_keys)
157
+ if unexpected_keys:
158
+ print("Unexpected keys:", unexpected_keys)
159
+
160
+ model = model.to(DEVICE)
161
+
162
+ tokenizer = AutoTokenizer.from_pretrained(olmo_path)
163
+
164
+ return model, tokenizer
165
+
166
+
167
+ def setup_model(model_name):
168
+ model = Olmo2ForCausalLMForSemiNAT.from_pretrained(model_name)
169
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
170
+ # config = AutoConfig.from_pretrained(model_name)
171
+ # model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
172
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
173
+
174
+ return model, tokenizer
175
+
176
+
177
+ def fsdp_main(args):
178
+ local_rank = int(os.environ['LOCAL_RANK'])
179
+ rank = int(os.environ['RANK'])
180
+ world_size = int(os.environ['WORLD_SIZE'])
181
+ if args.use_wandb and rank == 0:
182
+ wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name)
183
+
184
+ model, tokenizer = setup_model(args.model_path)
185
+ # model, tokenizer = load_trained_model(args.model_path)
186
+
187
+ model.config.chunk_size_limit = args.chunk_size_limit
188
+
189
+ # pdb.set_trace()
190
+
191
+ if ".pkl" in args.data_path:
192
+ train_dataset = pickle.load(open(args.data_path, "rb"))
193
+ else:
194
+ datasets = pd.read_parquet(args.data_path)
195
+ train_dataset = eval(f"{args.data_type}")(
196
+ tokenizer,
197
+ datasets, # your data preprocessing function
198
+ args.max_length, # your max input length
199
+ args.data_processess_num)
200
+
201
+ train_sampler = DistributedSampler(train_dataset,
202
+ rank=rank,
203
+ num_replicas=world_size,
204
+ shuffle=True)
205
+ train_dataloader = DataLoader(dataset=train_dataset,
206
+ sampler=train_sampler,
207
+ batch_size=args.batch_size)
208
+
209
+ print(f"Size of train dataset: {len(train_dataset)}")
210
+
211
+ setup()
212
+
213
+ Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
214
+ transformer_auto_wrap_policy,
215
+ transformer_layer_cls={
216
+ Olmo2DecoderLayerForSemiNAT,
217
+ NATDecoderForSemiNAT,
218
+ })
219
+
220
+ sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #for Zero2 and FULL_SHARD for Zero3
221
+ torch.cuda.set_device(local_rank)
222
+
223
+ bfSixteen = MixedPrecision(
224
+ param_dtype=torch.bfloat16,
225
+ reduce_dtype=torch.bfloat16,
226
+ buffer_dtype=torch.bfloat16,
227
+ )
228
+
229
+ # if bf16_ready:
230
+ mp_policy = bfSixteen
231
+ # else:
232
+ # mp_policy = None # defaults to fp32
233
+
234
+ # if args.use_lora:
235
+ # model = get_peft_model(model, lora_config)
236
+
237
+ # model is on CPU before input to FSDP
238
+ model = FSDP(model,
239
+ auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy,
240
+ mixed_precision=mp_policy,
241
+ sharding_strategy=sharding_strategy,
242
+ device_id=torch.cuda.current_device(),
243
+ use_orig_params=True)
244
+
245
+ optimizer = optim.AdamW(
246
+ model.parameters(),
247
+ lr=args.lr,
248
+ betas=args.betas,
249
+ weight_decay=args.weight_decay,
250
+ eps=args.eps,
251
+ )
252
+
253
+ # scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
254
+ scheduler = WarmupCosineScheduler(
255
+ optimizer=optimizer, # 优化器对象
256
+ warmup_steps=args.warmup_steps, # warmup 步数(或比例)
257
+ total_steps=args.total_steps, # 总训练步数
258
+ min_lr=args.min_lr # 最小学习率
259
+ )
260
+
261
+ torch.autograd.set_detect_anomaly(True)
262
+
263
+ loss1_list = []
264
+ loss2_list = []
265
+ loss_list = []
266
+
267
+ global_step = 0
268
+ for epoch in range(1, args.epochs + 1):
269
+ # t0 = time.time()
270
+ model.train()
271
+ local_rank = int(os.environ['LOCAL_RANK'])
272
+ # fsdp_loss = torch.zeros(2).to(local_rank)
273
+
274
+ if train_sampler:
275
+ train_sampler.set_epoch(epoch)
276
+ if rank == 0:
277
+ inner_pbar = tqdm(range(len(train_dataloader)),
278
+ colour="blue",
279
+ desc="r0 Training Epoch")
280
+ for batch in train_dataloader:
281
+ optimizer.zero_grad()
282
+ loss1, loss2 = model(input_ids=batch[0],
283
+ labels=batch[1],
284
+ attention_mask=batch[2],
285
+ slice_pos=batch[3],
286
+ use_cache=False).loss
287
+ loss = loss1 + loss2
288
+ # loss = loss2
289
+ loss1_list.append(loss1.item())
290
+ loss2_list.append(loss2.item())
291
+ loss_list.append(loss.item())
292
+ # pdb.set_trace()
293
+
294
+ # if torch.isnan(loss):
295
+ # print(f"Step {global_step}: loss is NaN, entering pdb …")
296
+ # pdb.set_trace()
297
+
298
+ # print(f"loss1:{loss1},loss2:{loss2}")
299
+ loss.backward()
300
+
301
+ # 按参数计算
302
+ for name, module in model.named_modules():
303
+ total_norm = 0.0
304
+ param_count = 0
305
+ for param in module.parameters(recurse=False):
306
+ if param.grad is not None:
307
+ total_norm += param.grad.data.norm(2).item()**2
308
+ param_count += 1
309
+ if param_count > 0:
310
+ total_norm = total_norm**0.5
311
+ wandb.log({f"grad_norm/{name}": total_norm},
312
+ step=global_step)
313
+
314
+ # 按层计算
315
+ # layer_grads = defaultdict(list)
316
+ # for name, module in model.named_modules():
317
+ # # 只处理 encoder.layers.N 这类结构(按需改写匹配条件)
318
+ # # pdb.set_trace()
319
+ # if "model.layers." in name or "model.decoder" in name or "model.encoder" in name:
320
+ # if "model.layers" in name:
321
+ # # 提取层号(如 encoder.layers.0 → layer0)
322
+ # parts = name.split(".")
323
+ # try:
324
+ # layer_idx = int(parts[3])
325
+ # except (IndexError, ValueError):
326
+ # pdb.set_trace()
327
+ # # continue
328
+
329
+ # layer_key = f"layer{layer_idx}"
330
+ # else:
331
+ # layer_key = "decoder" if "model.decoder" in name else "encoder"
332
+
333
+ # # 收集该模块下的参数梯度(只自身,不递归)
334
+ # for param in module.parameters(recurse=False):
335
+ # if param.grad is not None:
336
+ # layer_grads[layer_key].append(
337
+ # param.grad.detach().flatten())
338
+ # if "lm_head" in name:
339
+ # layer_key = "lm_head"
340
+ # layer_grads[layer_key].append(
341
+ # param.grad.detach().flatten())
342
+
343
+ # # 合并每层所有子模块的梯度,并 log 总体 L2 范数
344
+ # for layer_key, grads in layer_grads.items():
345
+ # if grads:
346
+ # total_grad = torch.cat(grads).norm(2).item()
347
+ # wandb.log({f"grad_norm/{layer_key}": total_grad},
348
+ # step=global_step)
349
+
350
+ optimizer.step()
351
+
352
+ global_step += 1
353
+
354
+ if global_step % args.save_steps == 0:
355
+ save_policy = FullStateDictConfig(offload_to_cpu=True,
356
+ rank0_only=True)
357
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
358
+ save_policy):
359
+ cpu_state = model.state_dict()
360
+
361
+ if rank == 0:
362
+ print(f"--> steps: {str(global_step)} saving model ...")
363
+ if not os.path.exists(args.save_path):
364
+ os.makedirs(args.save_path)
365
+ save_name = f"{args.save_name}-steps_{str(global_step)}.pt"
366
+ print(f"--> saving as model name {save_name}")
367
+ save_path = os.path.join(args.save_path, save_name)
368
+ torch.save(cpu_state, save_path)
369
+
370
+ if rank == 0:
371
+ inner_pbar.update(1)
372
+ if args.use_wandb:
373
+ wandb.log({
374
+ "length prediction loss":
375
+ sum(loss1_list[-20:]) / len(loss1_list[-20:]),
376
+ "nat loss":
377
+ sum(loss2_list[-20:]) / len(loss2_list[-20:]),
378
+ "loss":
379
+ sum(loss_list[-20:]) / len(loss_list[-20:])
380
+ })
381
+
382
+ dist.all_reduce(loss, op=dist.ReduceOp.SUM)
383
+
384
+ if rank == 0:
385
+ inner_pbar.close()
386
+
387
+ scheduler.step()
388
+
389
+ # if rank == 0:
390
+ # print(f"--> entering save model state")
391
+
392
+ # save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
393
+ # with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
394
+ # save_policy):
395
+ # cpu_state = model.state_dict()
396
+
397
+ # if rank == 0:
398
+ # print(f"--> epoch: {str(epoch)} saving model ...")
399
+ # if not os.path.exists(args.save_path):
400
+ # os.makedirs(args.save_path)
401
+ # save_name = f"{args.save_name}-epoch_{str(epoch)}.pt"
402
+ # print(f"--> saving as model name {save_name}")
403
+ # save_path = os.path.join(args.save_path, save_name)
404
+ # torch.save(cpu_state, save_path)
405
+
406
+ dist.barrier()
407
+ cleanup()
408
+
409
+
410
+ ################################# FSDP Config #####################################
411
+
412
+ if __name__ == "__main__":
413
+ # Training settings
414
+ parser = argparse.ArgumentParser()
415
+ parser.add_argument('--batch-size',
416
+ type=int,
417
+ default=4,
418
+ metavar='N',
419
+ help='input batch size for training (default: 64)')
420
+ parser.add_argument('--model_path', type=str)
421
+ parser.add_argument('--save_path', type=str)
422
+ parser.add_argument('--save_name', type=str)
423
+ parser.add_argument('--data_path', type=str)
424
+ parser.add_argument('--data_type', type=str)
425
+ parser.add_argument('--run_name', type=str)
426
+ parser.add_argument('--max_length', type=int)
427
+ parser.add_argument('--chunk_size_limit', type=int)
428
+ parser.add_argument('--save_steps', type=int, default=5000)
429
+ parser.add_argument('--data_processess_num', type=int, default=8)
430
+ parser.add_argument('--epochs',
431
+ type=int,
432
+ default=2,
433
+ metavar='N',
434
+ help='number of epochs to train (default: 3)')
435
+ parser.add_argument('--lr',
436
+ type=float,
437
+ default=.002,
438
+ metavar='LR',
439
+ help='learning rate (default: .002)')
440
+ parser.add_argument('--gamma',
441
+ type=float,
442
+ default=0.7,
443
+ metavar='M',
444
+ help='Learning rate step gamma (default: 0.7)')
445
+ parser.add_argument('--weight_decay', type=float)
446
+ parser.add_argument('--eps', type=float)
447
+ parser.add_argument('--decay_norm_and_bias', type=bool)
448
+ parser.add_argument(
449
+ "--betas",
450
+ type=float,
451
+ nargs=2, # 表示需要两个 float 值
452
+ default=[0.9, 0.999] # 可选默认值
453
+ )
454
+ parser.add_argument('--decay_embeddings', type=bool)
455
+ parser.add_argument('--warmup_steps', type=float)
456
+ parser.add_argument('--total_steps', type=int)
457
+ parser.add_argument('--min_lr', type=float)
458
+ parser.add_argument('--seed',
459
+ type=int,
460
+ default=1,
461
+ metavar='S',
462
+ help='random seed (default: 1)')
463
+ parser.add_argument('--use_lora', action='store_true', default=False)
464
+ parser.add_argument("--use_wandb",
465
+ action="store_true",
466
+ help="whether to use wandb")
467
+
468
+ args = parser.parse_args()
469
+
470
+ torch.manual_seed(args.seed)
471
+
472
+ fsdp_main(args)