HelinXu commited on
Commit
1ec3da2
·
1 Parent(s): 9622f59

Upload 5 files

Browse files
detection/configs/Base-RCNN-FPN.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ SEM_SEG_HEAD:
3
+ NUM_CLASSES: 15
4
+ META_ARCHITECTURE: "GeneralizedRCNN"
5
+ BACKBONE:
6
+ NAME: "build_resnet_fpn_backbone"
7
+ RESNETS:
8
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
9
+ FPN:
10
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
11
+ ANCHOR_GENERATOR:
12
+ SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
13
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
14
+ RPN:
15
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
16
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
17
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
18
+ # Detectron1 uses 2000 proposals per-batch,
19
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
20
+ # which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
21
+ POST_NMS_TOPK_TRAIN: 1000
22
+ POST_NMS_TOPK_TEST: 1000
23
+ ROI_HEADS:
24
+ NAME: "StandardROIHeads"
25
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
26
+ ROI_BOX_HEAD:
27
+ NAME: "FastRCNNConvFCHead"
28
+ NUM_FC: 2
29
+ POOLER_RESOLUTION: 7
30
+ ROI_MASK_HEAD:
31
+ NAME: "MaskRCNNConvUpsampleHead"
32
+ NUM_CONV: 4
33
+ POOLER_RESOLUTION: 14
34
+ DATASETS:
35
+ TRAIN: ("train_dora_ui",)
36
+ TEST: ("valid_dora_ui",)
37
+ SOLVER:
38
+ IMS_PER_BATCH: 16
39
+ BASE_LR: 0.0005
40
+ STEPS: (60000, 80000)
41
+ MAX_ITER: 90000
42
+ INPUT:
43
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
44
+ VERSION: 2
detection/configs/faster_rcnn_R_50_FPN_1x.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-RCNN-FPN.yaml"
2
+ MODEL:
3
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
4
+ MASK_ON: False
5
+ RESNETS:
6
+ DEPTH: 50
7
+ SOLVER:
8
+ CHECKPOINT_PERIOD: 5000
9
+ TEST:
10
+ EVAL_PERIOD: 1000
detection/data_util.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import numpy as np
3
+
4
+ from icecream import ic, install
5
+ install()
6
+ ic.configureOutput(includeContext=True, contextAbsPath=True)
7
+
8
+ def load_coco_json(json_path):
9
+ with open(json_path, 'r') as f:
10
+ data = json.load(f)
11
+ return data
12
+
13
+ # ['info', 'licenses', 'images', 'annotations', 'categories']
14
+
15
+ def split_train_val(data, val_ratio=0.1):
16
+ img_ids = [img['id'] for img in data['images']]
17
+ img_ids = np.array(img_ids)
18
+ np.random.shuffle(img_ids)
19
+ val_num = int(len(img_ids) * val_ratio)
20
+ val_ids = img_ids[:val_num]
21
+ train_ids = img_ids[val_num:]
22
+ train_data = {'info': data['info'], 'licenses': data['licenses'], 'images': [], 'annotations': [], 'categories': data['categories']}
23
+ val_data = {'info': data['info'], 'licenses': data['licenses'], 'images': [], 'annotations': [], 'categories': data['categories']}
24
+ for img in data['images']:
25
+ if img['id'] in train_ids:
26
+ train_data['images'].append(img)
27
+ else:
28
+ val_data['images'].append(img)
29
+ for ann in data['annotations']:
30
+ if ann['image_id'] in train_ids:
31
+ train_data['annotations'].append(ann)
32
+ else:
33
+ val_data['annotations'].append(ann)
34
+ return train_data, val_data
35
+
36
+ data = load_coco_json('/root/autodl-tmp/dora_dataset/train/_annotations.coco.json')
37
+ train_data, val_data = split_train_val(data)
38
+ # save train_data and val_data
39
+ with open('/root/autodl-tmp/dora_dataset/train.json', 'w') as f:
40
+ json.dump(train_data, f)
41
+ with open('/root/autodl-tmp/dora_dataset/val.json', 'w') as f:
42
+ json.dump(val_data, f)
43
+
detection/requirements.txt ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==1.0.0
2
+ antlr4-python3-runtime==4.9.3
3
+ anyio==3.4.0
4
+ appdirs==1.4.4
5
+ argon2-cffi==21.1.0
6
+ asttokens==2.2.1
7
+ attrs==21.2.0
8
+ Babel==2.9.1
9
+ backcall==0.2.0
10
+ black==21.4b2
11
+ bleach==4.1.0
12
+ brotlipy==0.7.0
13
+ cachetools==4.2.4
14
+ certifi==2021.5.30
15
+ cffi @ file:///tmp/build/80754af9/cffi_1625807838443/work
16
+ chardet @ file:///tmp/build/80754af9/chardet_1607706746162/work
17
+ click==8.1.3
18
+ cloudpickle==2.1.0
19
+ colorama==0.4.5
20
+ conda==4.10.3
21
+ conda-package-handling @ file:///tmp/build/80754af9/conda-package-handling_1618262148928/work
22
+ cryptography @ file:///tmp/build/80754af9/cryptography_1616769286105/work
23
+ cycler==0.11.0
24
+ debugpy==1.5.1
25
+ decorator==5.1.0
26
+ defusedxml==0.7.1
27
+ detectron2==0.6+cu113
28
+ entrypoints==0.3
29
+ executing==1.2.0
30
+ fonttools==4.28.2
31
+ future==0.18.2
32
+ fvcore==0.1.5.post20221122
33
+ google-auth==2.3.3
34
+ google-auth-oauthlib==0.4.6
35
+ grpcio==1.42.0
36
+ hydra-core==1.3.2
37
+ icecream==2.1.3
38
+ idna @ file:///home/linux1/recipes/ci/idna_1610986105248/work
39
+ importlib-metadata==4.8.2
40
+ importlib-resources==5.4.0
41
+ iopath==0.1.9
42
+ ipykernel==6.5.1
43
+ ipython==7.29.0
44
+ ipython-genutils==0.2.0
45
+ ipywidgets==7.6.5
46
+ jedi==0.18.1
47
+ Jinja2==3.0.3
48
+ json5==0.9.6
49
+ jsonschema==4.2.1
50
+ jupyter-client==7.1.0
51
+ jupyter-core==4.9.1
52
+ jupyter-server==1.12.0
53
+ jupyterlab==3.2.4
54
+ jupyterlab-language-pack-zh-CN==3.2.post2
55
+ jupyterlab-pygments==0.1.2
56
+ jupyterlab-server==2.8.2
57
+ jupyterlab-widgets==1.0.2
58
+ kiwisolver==1.3.2
59
+ Markdown==3.3.6
60
+ MarkupSafe==2.0.1
61
+ matplotlib==3.5.0
62
+ matplotlib-inline==0.1.3
63
+ mistune==0.8.4
64
+ mypy-extensions==1.0.0
65
+ nbclassic==0.3.4
66
+ nbclient==0.5.9
67
+ nbconvert==6.3.0
68
+ nbformat==5.1.3
69
+ nest-asyncio==1.5.1
70
+ notebook==6.4.6
71
+ numpy==1.21.4
72
+ oauthlib==3.1.1
73
+ omegaconf==2.3.0
74
+ opencv-python==4.7.0.72
75
+ packaging==21.3
76
+ pandocfilters==1.5.0
77
+ parso==0.8.2
78
+ pathspec==0.11.0
79
+ pexpect==4.8.0
80
+ pickleshare==0.7.5
81
+ Pillow==8.4.0
82
+ portalocker==2.5.1
83
+ prometheus-client==0.12.0
84
+ prompt-toolkit==3.0.22
85
+ protobuf==3.19.1
86
+ ptyprocess==0.7.0
87
+ pyasn1==0.4.8
88
+ pyasn1-modules==0.2.8
89
+ pycocotools==2.0.6
90
+ pycosat==0.6.3
91
+ pycparser @ file:///tmp/build/80754af9/pycparser_1594388511720/work
92
+ pydot==1.4.2
93
+ Pygments==2.10.0
94
+ pyOpenSSL @ file:///tmp/build/80754af9/pyopenssl_1608057966937/work
95
+ pyparsing==3.0.6
96
+ pyrsistent==0.18.0
97
+ PySocks @ file:///tmp/build/80754af9/pysocks_1605305779399/work
98
+ python-dateutil==2.8.2
99
+ pytz==2021.3
100
+ PyYAML==6.0
101
+ pyzmq==22.3.0
102
+ regex==2023.5.5
103
+ requests @ file:///tmp/build/80754af9/requests_1608241421344/work
104
+ requests-oauthlib==1.3.0
105
+ rsa==4.8
106
+ ruamel-yaml-conda @ file:///tmp/build/80754af9/ruamel_yaml_1616016699510/work
107
+ Send2Trash==1.8.0
108
+ setuptools-scm==6.3.2
109
+ six @ file:///tmp/build/80754af9/six_1623709665295/work
110
+ sniffio==1.2.0
111
+ supervisor==4.2.2
112
+ tabulate==0.8.10
113
+ tensorboard==2.7.0
114
+ tensorboard-data-server==0.6.1
115
+ tensorboard-plugin-wit==1.8.0
116
+ termcolor==2.1.1
117
+ terminado==0.12.1
118
+ testpath==0.5.0
119
+ toml==0.10.2
120
+ tomli==1.2.2
121
+ torch @ http://download.pytorch.org/whl/cu113/torch-1.10.0%2Bcu113-cp38-cp38-linux_x86_64.whl
122
+ torchvision @ http://download.pytorch.org/whl/cu113/torchvision-0.11.1%2Bcu113-cp38-cp38-linux_x86_64.whl
123
+ tornado==6.1
124
+ tqdm @ file:///tmp/build/80754af9/tqdm_1625563689033/work
125
+ traitlets==5.1.1
126
+ typing-extensions==4.0.0
127
+ urllib3 @ file:///tmp/build/80754af9/urllib3_1625084269274/work
128
+ wcwidth==0.2.5
129
+ webencodings==0.5.1
130
+ websocket-client==1.2.1
131
+ Werkzeug==2.0.2
132
+ widgetsnbextension==3.5.2
133
+ yacs==0.1.8
134
+ zipp==3.6.0
detection/train.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data_root = '/root/autodl-tmp/ui_dataset'
2
+
3
+ import logging
4
+ import os
5
+ from collections import OrderedDict
6
+ import torch
7
+ from torch.nn.parallel import DistributedDataParallel
8
+ import random
9
+ import cv2
10
+
11
+ import detectron2.utils.comm as comm
12
+ from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
13
+ from detectron2.config import get_cfg
14
+ from detectron2.utils.visualizer import Visualizer
15
+ from detectron2.data import (
16
+ datasets,
17
+ MetadataCatalog,
18
+ get_detection_dataset_dicts,
19
+ build_detection_test_loader,
20
+ build_detection_train_loader,
21
+ )
22
+ from detectron2.engine import default_argument_parser, default_setup, default_writers, launch
23
+ from detectron2.evaluation import (
24
+ CityscapesInstanceEvaluator,
25
+ CityscapesSemSegEvaluator,
26
+ COCOEvaluator,
27
+ COCOPanopticEvaluator,
28
+ DatasetEvaluators,
29
+ LVISEvaluator,
30
+ PascalVOCDetectionEvaluator,
31
+ SemSegEvaluator,
32
+ inference_on_dataset,
33
+ print_csv_format,
34
+ )
35
+ from detectron2.modeling import build_model
36
+ from detectron2.solver import build_lr_scheduler, build_optimizer
37
+ from detectron2.utils.events import EventStorage
38
+
39
+ from icecream import ic, install
40
+ install()
41
+ ic.configureOutput(includeContext=True, contextAbsPath=True)
42
+
43
+ logger = logging.getLogger("detectron2")
44
+
45
+
46
+ def visualize(dataset_name='valid_ui', num=4, iter=0):
47
+ if not os.path.exists('./imgs'):
48
+ os.mkdir('./imgs')
49
+ metadata = MetadataCatalog.get(dataset_name)
50
+ dataset = get_detection_dataset_dicts(dataset_name)
51
+
52
+ for i, d in enumerate(random.sample(dataset, num)):
53
+ img = cv2.imread(d["file_name"])
54
+ visualizer = Visualizer(img[:, :, ::-1], metadata=metadata, scale=0.5)
55
+ vis = visualizer.draw_dataset_dict(d)
56
+ cv2.imwrite(f'./imgs/{iter}_{dataset_name}_{i}.png', vis.get_image()[:, :, ::-1])
57
+
58
+
59
+ def get_evaluator(cfg, dataset_name, output_folder=None):
60
+ """
61
+ Create evaluator(s) for a given dataset.
62
+ This uses the special metadata "evaluator_type" associated with each builtin dataset.
63
+ For your own dataset, you can simply create an evaluator manually in your
64
+ script and do not have to worry about the hacky if-else logic here.
65
+ """
66
+ if output_folder is None:
67
+ output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
68
+ evaluator_list = []
69
+ evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
70
+ if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
71
+ evaluator_list.append(
72
+ SemSegEvaluator(
73
+ dataset_name,
74
+ distributed=True,
75
+ output_dir=output_folder,
76
+ )
77
+ )
78
+ if evaluator_type in ["coco", "coco_panoptic_seg"]:
79
+ evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
80
+ if evaluator_type == "coco_panoptic_seg":
81
+ evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
82
+ if evaluator_type == "cityscapes_instance":
83
+ return CityscapesInstanceEvaluator(dataset_name)
84
+ if evaluator_type == "cityscapes_sem_seg":
85
+ return CityscapesSemSegEvaluator(dataset_name)
86
+ if evaluator_type == "pascal_voc":
87
+ return PascalVOCDetectionEvaluator(dataset_name)
88
+ if evaluator_type == "lvis":
89
+ return LVISEvaluator(dataset_name, cfg, True, output_folder)
90
+ if len(evaluator_list) == 0:
91
+ raise NotImplementedError(
92
+ "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
93
+ )
94
+ if len(evaluator_list) == 1:
95
+ return evaluator_list[0]
96
+ return DatasetEvaluators(evaluator_list)
97
+
98
+
99
+ def do_test(cfg, model, storage=None):
100
+ results = OrderedDict()
101
+ for dataset_name in cfg.DATASETS.TEST:
102
+ data_loader = build_detection_test_loader(cfg, dataset_name)
103
+ evaluator = get_evaluator(
104
+ cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
105
+ )
106
+ results_i = inference_on_dataset(model, data_loader, evaluator)
107
+ results[dataset_name] = results_i
108
+ if comm.is_main_process():
109
+ logger.info("Evaluation results for {} in csv format:".format(dataset_name))
110
+ print_csv_format(results_i)
111
+ # dump to storage, save to tensorboard
112
+ if storage != None:
113
+ for key, value in results_i.items(): # key = bbox / segm; value = {'AP': xx, 'APm': xx, ...}
114
+ logging.info(f'key value: {key}, {value}')
115
+ logging.info(f'key: {key}')
116
+ out_aps_dict = {}
117
+ for k, v in value.items():
118
+ k = dataset_name + '_' + k
119
+ out_aps_dict[k] = v
120
+ # print('**{k: v.item() for k, v in comm.reduce_dict(results_i).items()}\n', type(**{k: v.item() for k, v in comm.reduce_dict(results_i).items()}))
121
+ storage.put_scalars(**out_aps_dict)
122
+ if len(results) == 1:
123
+ results = list(results.values())[0]
124
+ return results
125
+
126
+
127
+ def do_train(cfg, model, resume=False):
128
+ model.train()
129
+ optimizer = build_optimizer(cfg, model)
130
+ scheduler = build_lr_scheduler(cfg, optimizer)
131
+
132
+ checkpointer = DetectionCheckpointer(
133
+ model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
134
+ )
135
+ start_iter = (
136
+ checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
137
+ )
138
+ max_iter = cfg.SOLVER.MAX_ITER
139
+
140
+ periodic_checkpointer = PeriodicCheckpointer(
141
+ checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
142
+ )
143
+
144
+ writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
145
+
146
+ # compared to "train_net.py", we do not support accurate timing and
147
+ # precise BN here, because they are not trivial to implement in a small training loop
148
+ data_loader = build_detection_train_loader(cfg)
149
+ logger.info("Starting training from iteration {}".format(start_iter))
150
+ with EventStorage(start_iter) as storage:
151
+ for data, iteration in zip(data_loader, range(start_iter, max_iter)):
152
+ storage.iter = iteration
153
+
154
+ loss_dict = model(data)
155
+ losses = sum(loss_dict.values())
156
+ assert torch.isfinite(losses).all(), loss_dict
157
+
158
+ loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
159
+ losses_reduced = sum(loss for loss in loss_dict_reduced.values())
160
+ if comm.is_main_process():
161
+ storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
162
+
163
+ optimizer.zero_grad()
164
+ losses.backward()
165
+ optimizer.step()
166
+ storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
167
+ scheduler.step()
168
+
169
+ if (
170
+ cfg.TEST.EVAL_PERIOD > 0
171
+ and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
172
+ and iteration != max_iter - 1
173
+ ):
174
+ visualize('valid_ui', 5, iteration)
175
+ visualize('train_ui', 5, iteration)
176
+ do_test(cfg, model, storage)
177
+ # Compared to "train_net.py", the test results are not dumped to EventStorage
178
+ comm.synchronize()
179
+
180
+ if iteration - start_iter > 5 and (
181
+ (iteration + 1) % 20 == 0 or iteration == max_iter - 1
182
+ ):
183
+ for writer in writers:
184
+ writer.write()
185
+ periodic_checkpointer.step(iteration)
186
+
187
+
188
+ def setup(args):
189
+ """
190
+ Create configs and perform basic setups.
191
+ """
192
+ cfg = get_cfg()
193
+ cfg.merge_from_file(args.config_file)
194
+ cfg.merge_from_list(args.opts)
195
+ cfg.freeze()
196
+ default_setup(
197
+ cfg, args
198
+ ) # if you don't like any of the default setup, write your own setup code
199
+ return cfg
200
+
201
+
202
+ def main(args):
203
+ cfg = setup(args)
204
+
205
+ datasets.register_coco_instances("train_ui", {},
206
+ f"{data_root}/train/_annotations.coco.json",
207
+ f"{data_root}/train")
208
+ datasets.register_coco_instances("train_dora_ui", {},
209
+ f"{data_root.replace('ui_dataset', 'dora_dataset')}/train.json",
210
+ f"{data_root.replace('ui_dataset', 'dora_dataset')}/train")
211
+ datasets.register_coco_instances("test_ui", {},
212
+ f"{data_root}/test/_annotations.coco.json",
213
+ f"{data_root}/test")
214
+ datasets.register_coco_instances("valid_ui", {},
215
+ f"{data_root}/valid/_annotations.coco.json",
216
+ f"{data_root}/valid")
217
+ datasets.register_coco_instances("valid_dora_ui", {},
218
+ f"{data_root.replace('ui_dataset', 'dora_dataset')}/val.json",
219
+ f"{data_root.replace('ui_dataset', 'dora_dataset')}/train")
220
+ print('done registering datasets')
221
+
222
+
223
+ model = build_model(cfg)
224
+ logger.info("Model:\n{}".format(model))
225
+ if args.eval_only:
226
+ DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
227
+ cfg.MODEL.WEIGHTS, resume=args.resume
228
+ )
229
+ return do_test(cfg, model)
230
+
231
+ distributed = comm.get_world_size() > 1
232
+ if distributed:
233
+ model = DistributedDataParallel(
234
+ model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
235
+ )
236
+
237
+ do_train(cfg, model, resume=args.resume)
238
+ return do_test(cfg, model)
239
+
240
+
241
+ if __name__ == "__main__":
242
+ args = default_argument_parser().parse_args()
243
+ print("Command Line Args:", args)
244
+ launch(
245
+ main,
246
+ args.num_gpus,
247
+ num_machines=args.num_machines,
248
+ machine_rank=args.machine_rank,
249
+ dist_url=args.dist_url,
250
+ args=(args,),
251
+ )