sheep33333 commited on
Commit
00cf27f
·
verified ·
1 Parent(s): 4c09c34

Upload train5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train5.py +393 -0
train5.py ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+ from transformers import AutoTokenizer
3
+ from torch import nn
4
+ import os
5
+ import torch.distributed as dist
6
+ from tqdm import tqdm
7
+ from torch.utils.data import DataLoader
8
+ from torch.utils.data.distributed import DistributedSampler
9
+ from dataset import *
10
+ import wandb
11
+ import gc
12
+ import os
13
+ import argparse
14
+ import torch
15
+ import torch.optim as optim
16
+ import functools
17
+ import torch.distributed as dist
18
+ from torch.utils.data.distributed import DistributedSampler
19
+ from torch.distributed.fsdp import (
20
+ FullyShardedDataParallel as FSDP,
21
+ MixedPrecision,
22
+ ShardingStrategy,
23
+ FullStateDictConfig,
24
+ StateDictType,
25
+ )
26
+ from torch.distributed.fsdp.wrap import (
27
+ transformer_auto_wrap_policy,
28
+ )
29
+ from modelforseminat_v5 import *
30
+ from transformers import get_cosine_schedule_with_warmup
31
+
32
+
33
+ ################################# FSDP Config #####################################
34
+ def setup():
35
+ # initialize the process group
36
+ local_rank = int(os.environ['LOCAL_RANK'])
37
+ torch.cuda.set_device(local_rank)
38
+ dist.init_process_group(
39
+ backend='nccl',
40
+ init_method='env://',
41
+ )
42
+
43
+
44
+ def cleanup():
45
+ gc.collect()
46
+ torch.cuda.empty_cache()
47
+ dist.destroy_process_group()
48
+
49
+
50
+ def get_fsdp_device():
51
+ # 每个进程初始化分布式环境后调用
52
+ local_rank = int(os.environ.get("LOCAL_RANK", 0)) # torchrun 自动设置
53
+ device = torch.device(f"cuda:{local_rank}")
54
+ torch.cuda.set_device(device)
55
+ return device
56
+
57
+
58
+ def setup_model(
59
+ model_name: str, # HF 路径 / 本地目录
60
+ dtype: str = "bf16", # "bf16" | "fp16" | "fp32"
61
+ chunk_size_limit: int = 5,
62
+ attn_impl: str = "flash_attention_2",
63
+ load_model_dir: str | None = None, # 可能的 .pt 权重文件
64
+ decoder_layers: int = 1,
65
+ encoder_layer: int = 1,
66
+ mlp: bool = False,
67
+ position_embedding_type: str = "absolute",
68
+ base: str = None,
69
+ ):
70
+ # --- 1. 构造 config -------------------------------------------------------
71
+ config = Olmo2ConfigForSemiNAT.from_pretrained(
72
+ model_name,
73
+ chunk_size_limit=chunk_size_limit,
74
+ attn_implementation=attn_impl,
75
+ decoder_layers=decoder_layers,
76
+ encoder_layer=encoder_layer,
77
+ mlp=mlp,
78
+ position_embedding_type=position_embedding_type,
79
+ )
80
+ # --- 2. 按需设定 dtype ----------------------------------------------------
81
+ dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
82
+ torch_dtype = dtype_map.get(dtype, torch.float32)
83
+ # --- 3. 加载基础模型 ------------------------------------------------------
84
+ if base == "scratch":
85
+ model = Olmo2ForCausalLMForSemiNAT(config).to(torch_dtype)
86
+ # pdb.set_trace()
87
+ elif base == "pretrained":
88
+ model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
89
+ model_name,
90
+ config=config,
91
+ torch_dtype=torch_dtype,
92
+ )
93
+ # pdb.set_trace()
94
+ # --- 4. 若给定 .pt,则加载其参数 ----------------------------------------
95
+ if load_model_dir and os.path.isfile(load_model_dir) and load_model_dir.endswith(".pt"):
96
+ ckpt = torch.load(load_model_dir, map_location="cpu")
97
+ pdb.set_trace()
98
+ missing_keys, unexpected_keys = model.load_state_dict(ckpt, strict=False)
99
+ print(f"[INFO] Loaded weights from {load_model_dir}")
100
+ print(
101
+ f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
102
+ )
103
+ if missing_keys:
104
+ print("Missing keys:", missing_keys)
105
+ if unexpected_keys:
106
+ print("Unexpected keys:", unexpected_keys)
107
+
108
+ else:
109
+ print("[INFO] No extra .pt weights loaded")
110
+ # --- 5. tokenizer --------------------------------------------------------
111
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
112
+
113
+ # pdb.set_trace()
114
+
115
+ # config = AutoConfig.from_pretrained(model_name)
116
+ # model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
117
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
118
+ return model, tokenizer
119
+
120
+ def collate_fn(batch):
121
+ # 过滤 None
122
+ batch = [x for x in batch if x is not None]
123
+ if len(batch) == 0:
124
+ return None # 如果整 batch 都无效
125
+
126
+ input_ids, labels, attention_mask, slice_arr, slice_label = zip(*batch)
127
+
128
+ return (
129
+ torch.stack(input_ids),
130
+ torch.stack(labels),
131
+ torch.stack(attention_mask),
132
+ torch.stack(slice_arr),
133
+ torch.stack(slice_label)
134
+ )
135
+
136
+ def fsdp_main(args):
137
+ local_rank = int(os.environ['LOCAL_RANK'])
138
+ rank = int(os.environ['RANK'])
139
+ world_size = int(os.environ['WORLD_SIZE'])
140
+ if args.use_wandb and rank == 0:
141
+ wandb.init(entity="SemiNAT", project="SemiNAT-Debug", name=args.run_name)
142
+
143
+ local_rank = int(os.environ['LOCAL_RANK'])
144
+ DEVICE = f"cuda:{local_rank}"
145
+
146
+ model, tokenizer = setup_model(args.model_path,args.dtype,args.chunk_size_limit,args.attn_implementation,args.ptm_model_path,args.decoder_layers,args.encoder_layers,args.mlp,args.position_embedding_type,args.base)
147
+
148
+ optimizer = optim.AdamW(
149
+ model.parameters(),
150
+ lr=args.lr,
151
+ betas=args.betas,
152
+ weight_decay=args.weight_decay,
153
+ eps=args.eps,
154
+ )
155
+
156
+ train_dataset = eval(f"{args.data_type}")(
157
+ tokenizer,
158
+ args.data_path,
159
+ args.max_length
160
+ )
161
+ train_sampler = DistributedSampler(train_dataset,
162
+ rank=rank,
163
+ num_replicas=world_size,
164
+ shuffle=True)
165
+
166
+ train_dataloader = DataLoader(dataset=train_dataset,
167
+ sampler=train_sampler,
168
+ batch_size=args.batch_size,
169
+ num_workers=args.data_processess_num,
170
+ collate_fn=collate_fn)
171
+
172
+
173
+ num_training_steps = args.epochs * len(train_dataloader) # 总训练步数
174
+ num_warmup_steps = num_training_steps * args.warmup_ratio
175
+ scheduler = get_cosine_schedule_with_warmup(
176
+ optimizer,
177
+ num_warmup_steps=num_warmup_steps,
178
+ num_training_steps=num_training_steps
179
+ )
180
+
181
+
182
+ if args.resume_path:
183
+ checkpoint = torch.load(args.resume_path, map_location=DEVICE)
184
+ missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False)
185
+ print(
186
+ f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
187
+ )
188
+ if missing_keys:
189
+ print("Missing keys:", missing_keys)
190
+ if unexpected_keys:
191
+ print("Unexpected keys:", unexpected_keys)
192
+
193
+ optimizer.load_state_dict(checkpoint["optimizer"])
194
+ scheduler.load_state_dict(checkpoint["scheduler"])
195
+ global_step = checkpoint.get("global_step", 0)
196
+
197
+
198
+ print(f"Size of train dataset: {len(train_dataset)}")
199
+
200
+ setup()
201
+
202
+ Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
203
+ transformer_auto_wrap_policy,
204
+ transformer_layer_cls={
205
+ Olmo2DecoderLayer,
206
+ Olmo2DecoderLayerForSemiNAT
207
+ }
208
+ )
209
+
210
+ sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #for Zero2 and FULL_SHARD for Zero3
211
+ torch.cuda.set_device(local_rank)
212
+
213
+
214
+ mp_policy = MixedPrecision(
215
+ param_dtype=torch.bfloat16,
216
+ reduce_dtype=torch.bfloat16,
217
+ buffer_dtype=torch.bfloat16,
218
+ )
219
+
220
+ model = FSDP(model,
221
+ auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy,
222
+ mixed_precision=mp_policy,
223
+ sharding_strategy=sharding_strategy,
224
+ device_id=torch.cuda.current_device(),
225
+ use_orig_params=True)
226
+
227
+
228
+
229
+
230
+
231
+
232
+
233
+
234
+ torch.autograd.set_detect_anomaly(True)
235
+
236
+ loss1_list = []
237
+ loss2_list = []
238
+ loss_list = []
239
+
240
+ global_step = 0
241
+
242
+
243
+
244
+ start_time = time.time()
245
+
246
+ for epoch in range(1, args.epochs + 1):
247
+ # t0 = time.time()
248
+ model.train()
249
+ local_rank = int(os.environ['LOCAL_RANK'])
250
+ # fsdp_loss = torch.zeros(2).to(local_rank)
251
+
252
+ if train_sampler:
253
+ train_sampler.set_epoch(epoch)
254
+ if rank == 0:
255
+ inner_pbar = tqdm(range(len(train_dataloader)),
256
+ colour="blue",
257
+ desc="r0 Training Epoch")
258
+
259
+ memories = []
260
+
261
+ for batch in train_dataloader:
262
+ if batch is None:
263
+ continue
264
+ optimizer.zero_grad()
265
+ loss1, loss2 = model(input_ids=batch[0],
266
+ labels=batch[1],
267
+ attention_mask=batch[2],
268
+ slice_pos=batch[3],
269
+ slice_label=batch[4],
270
+ use_cache=False).loss
271
+ loss = loss1 + loss2
272
+ loss1_list.append(loss1.item())
273
+ loss2_list.append(loss2.item())
274
+ loss_list.append(loss.item())
275
+
276
+ loss.backward()
277
+ optimizer.step()
278
+ mem = torch.cuda.memory_allocated() / (1024 ** 2)
279
+ memories.append(mem)
280
+ global_step += 1
281
+
282
+ if global_step % args.save_steps == 0:
283
+ save_policy = FullStateDictConfig(offload_to_cpu=True,
284
+ rank0_only=True)
285
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
286
+ save_policy):
287
+ cpu_state = model.state_dict()
288
+
289
+ if rank == 0:
290
+ print(f"--> steps: {str(global_step)} saving model ...")
291
+ if not os.path.exists(args.save_path):
292
+ os.makedirs(args.save_path)
293
+ save_name = f"{args.save_name}-steps_{str(global_step)}.pt"
294
+ print(f"--> saving as model name {save_name}")
295
+ save_path = os.path.join(args.save_path, save_name)
296
+ torch.save({
297
+ "model": cpu_state,
298
+ "optimizer": optimizer.state_dict(),
299
+ "scheduler": scheduler.state_dict() if scheduler else None,
300
+ "global_step": global_step,
301
+ "args": vars(args),
302
+ }, save_path)
303
+
304
+ if rank == 0:
305
+ inner_pbar.update(1)
306
+ if args.use_wandb and rank == 0:
307
+ wandb.log({
308
+ "length prediction loss":
309
+ sum(loss1_list[-20:]) / len(loss1_list[-20:]),
310
+ "nat loss":
311
+ sum(loss2_list[-20:]) / len(loss2_list[-20:]),
312
+ "loss":
313
+ sum(loss_list[-20:]) / len(loss_list[-20:]),
314
+ "lr": scheduler.get_last_lr()[0]
315
+ })
316
+
317
+
318
+ avg_mem = sum(memories) / len(memories)
319
+ print(f"Average memory usage over {len(memories)} steps: {avg_mem:.2f} MB")
320
+
321
+
322
+ dist.all_reduce(loss, op=dist.ReduceOp.SUM)
323
+
324
+ if rank == 0:
325
+ inner_pbar.close()
326
+
327
+ scheduler.step()
328
+
329
+
330
+ end_time = time.time()
331
+ print(f"Training time: {end_time - start_time} seconds")
332
+
333
+ dist.barrier()
334
+ cleanup()
335
+
336
+
337
+ ################################# FSDP Config #####################################
338
+
339
+ if __name__ == "__main__":
340
+ # Training settings
341
+ parser = argparse.ArgumentParser()
342
+ parser.add_argument('--batch-size',
343
+ type=int,
344
+ default=4,
345
+ metavar='N',
346
+ help='input batch size for training (default: 64)')
347
+ parser.add_argument('--model_path', type=str)
348
+ parser.add_argument('--save_path', type=str)
349
+ parser.add_argument('--save_name', type=str)
350
+ parser.add_argument('--data_path', type=str)
351
+ parser.add_argument('--data_type', type=str)
352
+ parser.add_argument('--run_name', type=str)
353
+ parser.add_argument('--max_length', type=int)
354
+ parser.add_argument('--chunk_size_limit', type=int)
355
+ parser.add_argument('--save_steps', type=int, default=5000)
356
+ parser.add_argument('--data_processess_num', type=int, default=8)
357
+ parser.add_argument('--epochs',
358
+ type=int,
359
+ default=2,
360
+ metavar='N',
361
+ help='number of epochs to train (default: 3)')
362
+ parser.add_argument('--lr',
363
+ type=float,
364
+ default=.002,
365
+ metavar='LR',
366
+ help='learning rate (default: .002)')
367
+ parser.add_argument('--weight_decay', type=float)
368
+ parser.add_argument('--betas', type=float, nargs=2)
369
+ parser.add_argument('--eps', type=float)
370
+ parser.add_argument('--warmup_ratio', type=float)
371
+ parser.add_argument('--seed',
372
+ type=int,
373
+ default=1,
374
+ metavar='S',
375
+ help='random seed (default: 1)')
376
+ parser.add_argument('--use_lora', action='store_true', default=False)
377
+ parser.add_argument("--use_wandb",
378
+ action="store_true",
379
+ help="whether to use wandb")
380
+ parser.add_argument('--dtype', type=str)
381
+ parser.add_argument('--resume_path', type=str,default=None)
382
+ parser.add_argument('--attn_implementation', type=str)
383
+ parser.add_argument('--ptm_model_path', type=str,default=None)
384
+ parser.add_argument('--decoder_layers', type=int,default=1)
385
+ parser.add_argument('--encoder_layers', type=int,default=1)
386
+ parser.add_argument('--mlp', type=bool, default=False)
387
+ parser.add_argument('--position_embedding_type', type=str, default="absolute",choices=["absolute","relative"])
388
+ parser.add_argument('--base', type=str, default="scratch",choices=["scratch","pretrained"])
389
+ args = parser.parse_args()
390
+
391
+ torch.manual_seed(args.seed)
392
+
393
+ fsdp_main(args)