Upload apex-master/tests/distributed/amp_master_params/amp_master_params.py with huggingface_hub
Browse files
apex-master/tests/distributed/amp_master_params/amp_master_params.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import argparse
|
| 3 |
+
import os
|
| 4 |
+
from apex import amp
|
| 5 |
+
# FOR DISTRIBUTED: (can also use torch.nn.parallel.DistributedDataParallel instead)
|
| 6 |
+
from apex.parallel import DistributedDataParallel
|
| 7 |
+
|
| 8 |
+
parser = argparse.ArgumentParser()
|
| 9 |
+
# FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied
|
| 10 |
+
# automatically by torch.distributed.launch.
|
| 11 |
+
parser.add_argument("--local_rank", default=0, type=int)
|
| 12 |
+
args = parser.parse_args()
|
| 13 |
+
|
| 14 |
+
# FOR DISTRIBUTED: If we are running under torch.distributed.launch,
|
| 15 |
+
# the 'WORLD_SIZE' environment variable will also be set automatically.
|
| 16 |
+
args.distributed = False
|
| 17 |
+
if 'WORLD_SIZE' in os.environ:
|
| 18 |
+
args.distributed = int(os.environ['WORLD_SIZE']) > 1
|
| 19 |
+
|
| 20 |
+
if args.distributed:
|
| 21 |
+
# FOR DISTRIBUTED: Set the device according to local_rank.
|
| 22 |
+
torch.cuda.set_device(args.local_rank)
|
| 23 |
+
|
| 24 |
+
# FOR DISTRIBUTED: Initialize the backend. torch.distributed.launch will provide
|
| 25 |
+
# environment variables, and requires that you use init_method=`env://`.
|
| 26 |
+
torch.distributed.init_process_group(backend='nccl',
|
| 27 |
+
init_method='env://')
|
| 28 |
+
|
| 29 |
+
torch.manual_seed(torch.distributed.get_rank())
|
| 30 |
+
|
| 31 |
+
torch.backends.cudnn.benchmark = True
|
| 32 |
+
|
| 33 |
+
N, D_in, D_out = 64, 1024, 16
|
| 34 |
+
|
| 35 |
+
# Each process receives its own batch of "fake input data" and "fake target data."
|
| 36 |
+
# The "training loop" in each process just uses this fake batch over and over.
|
| 37 |
+
# https://github.com/NVIDIA/apex/tree/master/examples/imagenet provides a more realistic
|
| 38 |
+
# example of distributed data sampling for both training and validation.
|
| 39 |
+
x = torch.randn(N, D_in, device='cuda')
|
| 40 |
+
y = torch.randn(N, D_out, device='cuda')
|
| 41 |
+
|
| 42 |
+
model = torch.nn.Linear(D_in, D_out).cuda()
|
| 43 |
+
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
|
| 44 |
+
|
| 45 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
|
| 46 |
+
|
| 47 |
+
if args.distributed:
|
| 48 |
+
# FOR DISTRIBUTED: After amp.initialize, wrap the model with
|
| 49 |
+
# apex.parallel.DistributedDataParallel.
|
| 50 |
+
model = DistributedDataParallel(model)
|
| 51 |
+
# torch.nn.parallel.DistributedDataParallel is also fine, with some added args:
|
| 52 |
+
# model = torch.nn.parallel.DistributedDataParallel(model,
|
| 53 |
+
# device_ids=[args.local_rank],
|
| 54 |
+
# output_device=args.local_rank)
|
| 55 |
+
|
| 56 |
+
loss_fn = torch.nn.MSELoss()
|
| 57 |
+
|
| 58 |
+
for t in range(500):
|
| 59 |
+
optimizer.zero_grad()
|
| 60 |
+
y_pred = model(x)
|
| 61 |
+
loss = loss_fn(y_pred, y)
|
| 62 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
| 63 |
+
scaled_loss.backward()
|
| 64 |
+
optimizer.step()
|
| 65 |
+
|
| 66 |
+
if args.local_rank == 0:
|
| 67 |
+
print("final loss = ", loss)
|
| 68 |
+
|
| 69 |
+
torch.save(list(model.parameters()), "rank{}model.pth".format(torch.distributed.get_rank()))
|
| 70 |
+
torch.save(list(amp.master_params(optimizer)), "rank{}master.pth".format(torch.distributed.get_rank()))
|