sheep33333 commited on
Commit
f5bd390
·
verified ·
1 Parent(s): 9310c94

Upload train5.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train5.py +10 -8
train5.py CHANGED
@@ -2,6 +2,7 @@ import pdb
2
  from transformers import AutoTokenizer
3
  from torch import nn
4
  import os
 
5
  import torch.distributed as dist
6
  from tqdm import tqdm
7
  from torch.utils.data import DataLoader
@@ -144,7 +145,7 @@ def fsdp_main(args):
144
  rank = int(os.environ['RANK'])
145
  world_size = int(os.environ['WORLD_SIZE'])
146
  if args.use_wandb and rank == 0:
147
- wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name)
148
 
149
  local_rank = int(os.environ['LOCAL_RANK'])
150
  DEVICE = f"cuda:{local_rank}"
@@ -250,7 +251,7 @@ def fsdp_main(args):
250
 
251
  if train_sampler:
252
  train_sampler.set_epoch(epoch)
253
-
254
  if rank == 0:
255
  inner_pbar = tqdm(range(len(train_dataloader)),
256
  colour="blue",
@@ -278,8 +279,9 @@ def fsdp_main(args):
278
 
279
  loss.backward()
280
  optimizer.step()
281
- mem = torch.cuda.memory_allocated() / (1024 ** 2)
282
- memories.append(mem)
 
283
  global_step += 1
284
 
285
  if global_step % args.save_steps == 0:
@@ -327,16 +329,16 @@ def fsdp_main(args):
327
  })
328
 
329
 
330
- avg_mem = sum(memories) / len(memories)
331
- print(f"Average memory usage over {len(memories)} steps: {avg_mem:.2f} MB")
332
 
333
 
334
- dist.all_reduce(loss, op=dist.ReduceOp.SUM)
335
 
336
  if rank == 0:
337
  inner_pbar.close()
338
 
339
- scheduler.step()
340
 
341
 
342
  end_time = time.time()
 
2
  from transformers import AutoTokenizer
3
  from torch import nn
4
  import os
5
+ import time
6
  import torch.distributed as dist
7
  from tqdm import tqdm
8
  from torch.utils.data import DataLoader
 
145
  rank = int(os.environ['RANK'])
146
  world_size = int(os.environ['WORLD_SIZE'])
147
  if args.use_wandb and rank == 0:
148
+ wandb.init(entity="SemiNAT", project="SemiNAT-Debug", name=args.run_name)
149
 
150
  local_rank = int(os.environ['LOCAL_RANK'])
151
  DEVICE = f"cuda:{local_rank}"
 
251
 
252
  if train_sampler:
253
  train_sampler.set_epoch(epoch)
254
+
255
  if rank == 0:
256
  inner_pbar = tqdm(range(len(train_dataloader)),
257
  colour="blue",
 
279
 
280
  loss.backward()
281
  optimizer.step()
282
+ scheduler.step()
283
+ # mem = torch.cuda.memory_allocated() / (1024 ** 2)
284
+ # memories.append(mem)
285
  global_step += 1
286
 
287
  if global_step % args.save_steps == 0:
 
329
  })
330
 
331
 
332
+ # avg_mem = sum(memories) / len(memories)
333
+ # print(f"Average memory usage over {len(memories)} steps: {avg_mem:.2f} MB")
334
 
335
 
336
+ # dist.all_reduce(loss, op=dist.ReduceOp.SUM)
337
 
338
  if rank == 0:
339
  inner_pbar.close()
340
 
341
+
342
 
343
 
344
  end_time = time.time()