code
stringlengths
3
6.57k
dist.get_rank()
torch.device("cuda")
_configure_with_arguments(self, args, mpu)
int(os.environ['LOCAL_RANK'])
hasattr(args, 'local_rank')
DeepSpeedConfig(config_file, mpu, param_dict=self.config_params)
_do_args_sanity_check(self, args)
hasattr(args, 'deepscale_config')
hasattr(args, 'deepspeed_config')
hasattr(args, 'local_rank')
isinstance(args.local_rank, int)
type(args.local_rank)
int(os.environ.get("LOCAL_RANK")
hasattr(args, 'deepspeed_config')
os.path.isfile(args.deepspeed_config)
format(args.deepspeed_config)
_is_supported_optimizer(self, optimizer_name)
getattr(torch.optim, optimizer_name, None)
_do_sanity_check(self)
self.optimizer_name()
self._is_supported_optimizer(self.optimizer_name()
format(self.optimizer_name()
self.optimizer_name()
self.dynamic_loss_scale()
format(self.optimizer_name()
_broadcast_model(self)
is_replicated(p)
hasattr(p, 'ds_status')
self.module.parameters()
torch.is_tensor(p)
is_replicated(p)
_configure_distributed_model(self, model)
self.fp16_enabled()
self.module.half()
self.module.to(self.device)
_initialize_parameter_parallel_groups()
dist.get_world_size()
self.mpu.get_data_parallel_group()
self.mpu.get_data_parallel_world_size()
self.mpu.get_model_parallel_world_size()
self.mpu.get_data_parallel_group()
self.amp_enabled()
self._broadcast_model()
_configure_optimizer(self, client_optimizer, model_parameters)
len(pg["params"])
logger.info('Using client Optimizer as basic optimizer')
self._configure_basic_optimizer(model_parameters)
self.optimizer_name()
self.zero_optimization()
self.amp_enabled()
use (legacy)
is_zero_supported_optimizer(basic_optimizer)
self.zero_allow_untested_optimizer()
self._configure_zero_optimizer(basic_optimizer)
self.amp_enabled()
self.fp16_enabled()
with (legacy)
self.amp_params()
logger.info(f"Initializing AMP with these params: {amp_params}")
logger.info("Initializing Apex amp from: {}".format(amp.__path__)
amp.initialize(self.module, basic_optimizer, **amp_params)
self._broadcast_model()
self.fp16_enabled()
self._configure_fp16_optimizer(basic_optimizer)
log_dist('DeepSpeed Final Optimizer = {}'.format(self.optimizer_name()
_configure_basic_optimizer(self, model_parameters)
self.optimizer_params()
print(optimizer_parameters.keys()
optimizer_parameters.keys()
self.optimizer_name()
optimizer_parameters.pop(TORCH_ADAM_PARAM, False)
optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT)
self.zero_cpu_offload()
self.optimizer_name()
FusedLamb(model_parameters, **optimizer_parameters)
self.optimizer_name()
OnebitAdam(model_parameters, self, **optimizer_parameters)
self.fp16_enabled()
getattr(torch.optim, self.optimizer_name()
torch_optimizer(model_parameters, **optimizer_parameters)
_configure_fp16_optimizer(self, optimizer)
self.initial_dynamic_scale()
self.dynamic_loss_scale_args()
self.gradient_clipping()
self.optimizer_name()
self.dynamic_loss_scale()
log_dist('Creating fp16 optimizer with dynamic loss scale', ranks=[0])
self.wall_clock_breakdown()
self.optimizer_legacy_fusion()
self.loss_scale()
self.loss_scale()
self.optimizer_legacy_fusion()
self.loss_scale()
self.dynamic_loss_scale()
self.optimizer_name()
_configure_zero_optimizer(self, optimizer)
self.zero_optimization_stage()
log_dist('Creating fp16 ZeRO stage {} optimizer'.format(zero_stage)
self.allreduce_always_fp32()
self.wall_clock_breakdown()