code stringlengths 3 6.57k |
|---|
self.postscale_gradients() |
self.gradient_predivide_factor() |
self.buffered_allreduce_fallback(elements_per_buffer=bucket_size) |
backward(self, loss, allreduce_gradients=True, release_loss=False) |
self.gradient_accumulation_steps() |
self._scale_loss(loss.float() |
self.tensorboard_enabled() |
self.is_gradient_accumulation_boundary() |
loss.mean() |
item() |
self.gradient_accumulation_steps() |
self.summary_writer.add_scalar(event[0], event[1], event[2]) |
self.summary_writer.flush() |
self.wall_clock_breakdown() |
self.timers('backward_microstep') |
start() |
self.timers('backward') |
start() |
self.wall_clock_breakdown() |
self.timers('backward_inner_microstep') |
start() |
self.timers('backward_inner') |
start() |
self.zero_optimization() |
self.optimizer.backward(loss) |
self.amp_enabled() |
self.is_gradient_accumulation_boundary() |
scaled_loss.backward() |
self.fp16_enabled() |
self.optimizer.backward(loss) |
loss.backward() |
self.wall_clock_breakdown() |
self.timers('backward_inner') |
stop() |
self.timers('backward_inner_microstep') |
stop() |
self.wall_clock_breakdown() |
self.timers('backward_allreduce_microstep') |
start() |
self.timers('backward_allreduce') |
start() |
self.allreduce_gradients() |
self.wall_clock_breakdown() |
self.timers('backward_allreduce') |
stop() |
self.timers('backward_allreduce_microstep') |
stop() |
self.timers('backward') |
stop() |
self.timers('backward_microstep') |
stop() |
is_gradient_accumulation_boundary(self) |
return (self.micro_steps + 1) |
self.gradient_accumulation_steps() |
zero_grad(self) |
self.module.named_parameters() |
clip_fp32_gradients(self) |
torch.nn.utils.clip_grad_norm_(parameters=self.module.parameters() |
self.gradient_clipping() |
_take_model_step(self, lr_kwargs) |
self.gradient_clipping() |
self.fp16_enabled() |
self.amp_enabled() |
self.clip_fp32_gradients() |
self.amp_enabled() |
amp.master_params(self.optimizer) |
self.gradient_clipping() |
self.optimizer.step() |
self.zero_optimization() |
self.amp_enabled() |
self.zero_grad() |
self.optimizer.zero_grad() |
step() |
hasattr(self.optimizer, 'overflow') |
self.lr_scheduler.step(**(lr_kwargs or {}) |
and (self.global_steps + 1) |
self.steps_per_print() |
self._report_progress(self.global_steps + 1) |
self.train_batch_size() |
step(self, lr_kwargs=None) |
self.wall_clock_breakdown() |
self.timers('step_microstep') |
start() |
self.timers('step') |
start() |
self.is_gradient_accumulation_boundary() |
self.progressive_layer_drop.update_state(self.global_steps) |
self._take_model_step(lr_kwargs) |
self.tput_timer.stop(report_progress) |
self.tensorboard_enabled() |
self.is_gradient_accumulation_boundary() |
self.get_lr() |
self.summary_writer.add_scalar(event[0], event[1], event[2]) |
self.fp16_enabled() |
hasattr(self.optimizer, 'cur_scale') |
self.summary_writer.add_scalar(event[0], event[1], event[2]) |
self.summary_writer.flush() |
self.wall_clock_breakdown() |
self.timers('step') |
stop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.