code stringlengths 3 6.57k |
|---|
gradient_accumulation_steps (int) |
checkpoint_tag_validation_enabled(self) |
checkpoint_tag_validation_fail(self) |
elasticity_enabled(self) |
pld_enabled(self) |
pld_params(self) |
pld_theta(self) |
self.pld_params() |
pld_gamma(self) |
self.pld_params() |
tensorboard_enabled(self) |
tensorboard_output_path(self) |
tensorboard_job_name(self) |
os.path.join(os.path.expanduser("~") |
self.tensorboard_output_path() |
self.tensorboard_output_path() |
self.tensorboard_job_name() |
os.path.join(base_dir, job_name) |
self.tensorboard_job_name() |
self.tensorboard_job_name() |
os.path.join(infra_job_id, "logs") |
os.path.join(base, summary_writer_dir_name, name) |
os.makedirs(log_dir, exist_ok=True) |
SummaryWriter(log_dir=log_dir) |
wall_clock_breakdown(self) |
flops_profiler_enabled(self) |
flops_profiler_profile_step(self) |
flops_profiler_module_depth(self) |
flops_profiler_top_modules(self) |
flops_profiler_detailed(self) |
memory_breakdown(self) |
sparse_gradients_enabled(self) |
train_batch_size(self) |
train_micro_batch_size_per_gpu(self) |
optimizer_name(self) |
optimizer_params(self) |
optimizer_legacy_fusion(self) |
scheduler_name(self) |
scheduler_params(self) |
zero_optimization(self) |
zero_allow_untested_optimizer(self) |
zero_reduce_scatter(self) |
zero_overlap_comm(self) |
zero_offload_optimizer(self) |
zero_offload_param(self) |
zero_cpu_offload(self) |
zero_sub_group_size(self) |
zero_optimization_stage(self) |
zero_reduce_bucket_size(self) |
zero_allgather_bucket_size(self) |
zero_optimization_partition_gradients(self) |
self.zero_optimization_stage() |
zero_optimization_partition_weights(self) |
self.zero_optimization_stage() |
zero_contiguous_gradients(self) |
zero_load_from_fp32_weights(self) |
zero_elastic_checkpoint(self) |
zero_max_live_parameters(self) |
zero_max_reuse_distance(self) |
zero_prefetch_bucket_size(self) |
zero_param_persistence_threshold(self) |
zero_gather_fp16_weights_on_model_save(self) |
fp16_enabled(self) |
amp_enabled(self) |
amp_params(self) |
loss_scale(self) |
gradient_accumulation_steps(self) |
allreduce_always_fp32(self) |
postscale_gradients(self) |
gradient_predivide_factor(self) |
steps_per_print(self) |
zero_allgather_partitions(self) |
dump_state(self) |
gradient_clipping(self) |
dynamic_loss_scale(self) |
initial_dynamic_scale(self) |
dynamic_loss_scale_args(self) |
swap_tensor_config(self) |
aio_config(self) |
_configure_lr_scheduler(self, client_lr_scheduler) |
self._scheduler_from_config(self.optimizer) |
self.scheduler_name() |
logger.info('DeepSpeed using client LR scheduler') |
log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0]) |
_configure_checkpointing(self, dist_init_required) |
self.mpu.get_data_parallel_rank() |
self.zero_optimization_partition_weights() |
self.zero_optimization() |
_scheduler_from_config(self, optimizer) |
self.scheduler_name() |
hasattr(lr_schedules, scheduler_name) |
getattr(lr_schedules, scheduler_name) |
hasattr(torch.optim.lr_scheduler, scheduler_name) |
getattr(torch.optim.lr_scheduler, scheduler_name) |
self.scheduler_params() |
scheduler(optimizer, **scheduler_params) |
_set_distributed_vars(self) |
torch.cuda.set_device(self.local_rank) |
torch.device("cuda", self.local_rank) |
dist.get_world_size() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.