code stringlengths 17 6.64M |
|---|
def get_dcr(x, req_grad):
assert_same_size(x, req_grad)
res = []
for (t, r) in zip(x, req_grad):
if isinstance(t, Tensor):
assert isinstance(r, bool)
res.append(t.detach().clone().requires_grad_(r))
else:
assert (r is False)
res.append(t)
return res
|
def get_dr(x, req_grad):
try:
assert_same_size(x, req_grad)
except Exception as e:
print(x)
print(req_grad)
raise e
res = []
for (t, r) in zip(x, req_grad):
if isinstance(t, Tensor):
assert isinstance(r, bool)
res.append(t.detach().requires_grad_(r))
else:
assert (r is False)
res.append(t)
return res
|
def get_r(x, req_grad):
assert_same_size(x, req_grad)
res = []
for (t, r) in zip(x, req_grad):
if isinstance(t, Tensor):
assert isinstance(r, bool)
res.append(t.requires_grad_(r))
else:
assert (r is False)
res.append(t)
return res
|
class SinglePartitionManager():
def __init__(self, stage: int, stage_depth: int, pipeline_depth: int, num_stages, partition: torch.nn.Module, comm_handler: CommunicationHandlerBase, work_scheduler: WorkScheduler, device, is_last_partition, is_first_partition, log_frequency=100, step_every=1, use_recomputation=True, gap_aware_just_loss=False, sync_buffers=False, weight_stashing_just_for_stats=False, disable_clone_inputs=False, req_grad=None, scale_down_lr_for_smaller_batches=False):
if (not disable_clone_inputs):
disable_clone_inputs = True
warnings.warn('setting disable_clone_inputs=True to avoid double clone since we clone in MPI too.')
if (gap_aware_just_loss and (not use_recomputation)):
raise NotImplementedError('gap_aware_just_loss works only with recomputation on')
self.work_scheduler = work_scheduler
self.logger = logging.getLogger('msnag')
self.comm_handler = comm_handler
self.sync_buffers = sync_buffers
self.device = device
self.is_last_partition = is_last_partition
self.is_first_partition = (is_first_partition or (stage_depth == (pipeline_depth - 1)))
self.stage = stage
self.pipeline_depth = pipeline_depth
self.num_stages = num_stages
self.step_every = step_every
self.true_stage_depth = stage_depth
if hasattr(self.work_scheduler, 'get_virtual_stage_depth'):
self.stage_depth = self.work_scheduler.get_virtual_stage_depth(stage_depth)
else:
self.stage_depth = stage_depth
self._init_partition(partition, use_recomputation, disable_clone_inputs, req_grad)
self.is_replicated = self._maybe_init_ddp(comm_handler, partition, stage, sync_buffers)
self.comm_handler.init_buffers()
self.futures_handler = self.comm_handler.create_futures_handler()
self.weight_predictor = None
self.gap_aware = None
self.weight_stasher = None
self.gap_aware_just_loss = gap_aware_just_loss
self.weight_stashing_just_for_stats = weight_stashing_just_for_stats
self.true_weights_storage = None
self.delay_at_batch = {}
self.saved_for_backward = dict()
self.dl_iter = None
self.log_frequency = log_frequency
self.batches = 0
self.true_weights_storage = None
self.delay_at_batch = {}
self.saved_for_backward = dict()
self.dl_iter = None
self.data_propagator: PipelineDataPropagator
self.trainer: MultiPartitionTrainer
self.weight_predictor: WeightPredictor
self.gap_aware: GapAwareBase
self.weight_stasher: WeightStasher
self.true_weights_storage: TrueWeightsStorage
self.partition: Partition
def _maybe_init_ddp(self, comm_handler, partition, stage, sync_buffers):
is_replicated = False
if hasattr(comm_handler, 'init_ddp_context'):
is_replicated = True
ddp = comm_handler.init_ddp_context(self.partition.layers)
self.partition.layers = ddp
self.logger.info(f'Initialized DDP stage replication for for stage {stage}.')
self.backward_nosync_context_manager = ddp.no_sync
if sync_buffers:
self.buffers_to_sync = get_buffers_for_ddp_sync(partition.layers)
return is_replicated
def _init_partition(self, partition, use_recomputation, disable_clone_inputs, req_grad):
if (self.stage_depth == 0):
use_recomputation = False
TO_DEVICE = False
is_last_partition = self.is_last_partition
is_first_partition = self.is_first_partition
device = self.device
if use_recomputation:
if is_last_partition:
partition_cls = LastPartition
elif is_first_partition:
partition_cls = FirstPartition
else:
partition_cls = Partition
self.partition = partition_cls(partition, device, to_device=TO_DEVICE, req_grad=req_grad)
elif is_last_partition:
partition_cls = LastPartition
self.partition = partition_cls(partition, device, to_device=TO_DEVICE, req_grad=req_grad)
elif is_first_partition:
partition_cls = PartitionWithoutRecomputation
self.partition = partition_cls(partition, device, to_device=TO_DEVICE, _REQ_GRAD=False, req_grad=req_grad)
else:
partition_cls = PartitionWithoutRecomputation
self.partition = partition_cls(partition, device, to_device=TO_DEVICE, _REQ_GRAD=True, req_grad=req_grad)
if disable_clone_inputs:
partition_cls._CLONE_INPUTS = False
if (not TO_DEVICE):
self.partition.to(device)
def set_true_weights_storage(self, true_weights_storage):
self.true_weights_storage = true_weights_storage
se = self.step_every
def _get_micro_batch(self, batch_index):
if (batch_index <= se):
return batch_index
return ((batch_index + 1) % se)
self.get_micro_batch = types.MethodType(_get_micro_batch, self)
def get_micro_batch(self, batch_index):
return (batch_index % self.step_every)
def scale_lr(self, factor):
pgs = self.trainer.optimizer.param_groups
old_lrs = []
for g in pgs:
old_lr = g['lr']
new_lr = (old_lr * factor)
g['lr'] = new_lr
old_lrs.append(old_lr)
return old_lrs
def is_last_micro_batch(self, batch_idx) -> bool:
'Simply return if a this is the last micro batch'
se = self.step_every
do_step = ((batch_idx % se) == (se - 1))
return do_step
def set_data_propagator(self, data_propagator: PipelineDataPropagator):
self.data_propagator = data_propagator
def set_trainer(self, trainer: PipelineSupportedTrainerType):
self.trainer = trainer
def set_dataloader(self, dataloader, debug_run_limit=(- 1), fake_draw=DEBUG_FAKE_DRAW, dl_iter=None):
if (dl_iter is not None):
self.dl_iter = dl_iter
else:
self.dl_iter = iter(dataloader)
if (fake_draw and (debug_run_limit > 0) and (debug_run_limit < len(dataloader))):
fake_draw = (len(dataloader) - debug_run_limit)
for _ in range(fake_draw):
next(self.dl_iter)
def set_weight_predictor(self, weight_predictor: WeightPredictor):
self.weight_predictor = weight_predictor
def set_lr_scheduler(self, lr_scheduler):
self.lr_scheduler = lr_scheduler
def set_gap_aware(self, gap_aware):
self.gap_aware = gap_aware
if (self.step_every > 1):
raise NotImplementedError('deprecated, work in progress')
def set_weight_stasher(self, weight_stasher: WeightStasher):
assert (weight_stasher is not None)
if self.is_last_partition:
raise NotImplementedError('Assuming last stage does not need stashing')
self.weight_stasher = weight_stasher
def train(self):
'Sets training mode.\n Also Handles the transition : eval -> train \n '
self.partition.train()
self.comm_handler.train()
def eval(self):
'Sets evaluation mode.\n Also handles the transition : train -> eval\n Also handles buffer sync in case stage_id is replicated\n '
self.comm_handler.eval()
self.partition.eval()
if (self.is_replicated and self.sync_buffers):
self.comm_handler.sync_buffers(self.buffers_to_sync)
def maybe_log_lr(self):
self.batches += 1
if ((self.batches % self.log_frequency) == 0):
batch_log_str = ''
if hasattr(self.trainer, 'scheduler'):
lr = self.trainer.scheduler.get_last_lr()[0]
batch_log_str += '| lr {:02.9f}'.format(lr)
self.logger.info(batch_log_str)
def forward_pass_and_send(self, batch_idx, num_batches, preload_input_partition):
if self.is_first_partition:
x = preload_input_partition
else:
last_due_end = ((batch_idx + 1) == num_batches)
x = self.comm_handler.get_data_forward(batch_idx, num_batches, last_due_end)
x = (*preload_input_partition, *x)
(x, *ctx) = self.data_propagator.unpack_data_for_partition(x)
x = self.partition(x, batch_idx)
request_objects = None
if ((not self.is_last_partition) or (self.true_stage_depth > 0)):
send_ctx = self.data_propagator.pack_send_context(x, *ctx)
request_objects = self.comm_handler.send_activations(send_ctx, batch_idx)
return (request_objects, x, ctx)
def run_batch_forward(self, batch_idx, num_batches, done_bwds=None):
" Handles the forward pass, for last partition also handles the backward pass.\n\n Algorithm:\n - Get the data\n - Forward pass (including: wp, ws)\n optional: Weight Prediction (wp)\n optional: Weight Stashing (ws)\n - Send to next partition (*if needed)\n - If last partition: do the backward and send to previous partition\n\n\n In more detail:\n # (1) PRELOAD (do stuff like load weights, NAG, etc....)\n # (2) the actual forward\n # (3) send activation (if not last partition)\n # (4) stash weights if needed, etc. (NOTE: last partition don't stash)\n\n # (5) last partition does its thing:\n # (5.1) recompute\n # (5.2) send activation back\n # (5.3) restore, step,...\n\n Feature:\n - Pre load Y to last partition if possible\n "
(preload_input_partition, preload_input_to_outside_loss) = self.data_propagator.preload_from_dataloader(self.dl_iter)
partition = self.partition
is_training = partition.training
if is_training:
expected_staleness = self.expected_staleness(batch_idx, done_bwds)
self.delay_at_batch[batch_idx] = expected_staleness
weight_predictor = self.weight_predictor
weight_stasher = self.weight_stasher
if (weight_predictor is not None):
old_lrs = None
if (batch_idx >= self.first_effected_batch):
old_lrs = self.scale_lr(self.reminder_scaler_lr_factor)
weight_predictor.setup(expected_staleness)
weight_predictor.forward()
if old_lrs:
pgs = self.trainer.optimizer.param_groups
for (pg, old_lr) in zip(pgs, old_lrs):
pg['lr'] = old_lr
(request_objects, x, ctx) = self.forward_pass_and_send(batch_idx, num_batches, preload_input_partition)
if (weight_stasher is not None):
if ((expected_staleness == 0) and weight_predictor.nag_with_predictor):
expected_staleness = 1
weight_stasher.stash_current(batch_idx, expected_staleness)
else:
(request_objects, x, ctx) = self.forward_pass_and_send(batch_idx, num_batches, preload_input_partition)
if (weight_stasher is not None):
weight_stasher.stash_current(batch_idx, expected_staleness)
if ((expected_staleness > 0) or (self.true_stage_depth > 0)):
self.true_weights_storage.restore_if_needed()
return request_objects
else:
(request_objects, x, ctx) = self.forward_pass_and_send(batch_idx, num_batches, preload_input_partition)
if self.is_last_partition:
ctx = (*preload_input_to_outside_loss, *ctx)
self.trainer.calc_test_stats(x, *ctx)
return []
else:
return request_objects
assert is_training
assert (self.true_stage_depth == 0), self.true_stage_depth
ctx = (*preload_input_to_outside_loss, *ctx)
self.saved_for_backward[batch_idx] = (x, *ctx)
request_objects = self.last_partition_batch_backward(batch_idx, num_batches)
return request_objects
def last_partition_batch_backward(self, batch_idx: int, num_batches: int):
if (not self.is_last_partition):
raise NotImplementedError('currently only last partition should be at depth 0.')
(x, *ctx) = self.saved_for_backward.pop(batch_idx)
trainer = self.trainer
old_lrs = None
do_step = self.is_last_micro_batch(batch_idx)
last_due_end = ((batch_idx + 1) == num_batches)
if ((not do_step) and last_due_end):
do_step = True
old_lrs = self.scale_lr(self.reminder_scaler_lr_factor)
if ((not do_step) and self.is_replicated):
with self.backward_nosync_context_manager():
step_and_stats_ctx = trainer.backprop_last_partition(x, *ctx)
else:
step_and_stats_ctx = trainer.backprop_last_partition(x, *ctx)
request_objects = self.comm_handler.send_gradients(self.partition.get_grad(batch_idx), batch_idx)
self.true_weights_storage.restore_if_needed()
trainer.last_partition_step_and_statistics(x, *ctx, step_and_stats_ctx, step=do_step, old_lrs=old_lrs)
if do_step:
self.true_weights_storage.reset_on_step()
self.maybe_log_lr()
return request_objects
def run_batch_backward(self, batch_idx, num_batches, next_backward_batch_idx=None):
' Runs the backwards pass + step for all except the last partition '
last_due_end = ((batch_idx + 1) == num_batches)
self.comm_handler.pre_recv_gradients(batch_idx, num_batches, last_due_end)
weight_stasher = self.weight_stasher
if (weight_stasher and (not self.gap_aware_just_loss)):
weight_stasher.pop_and_load_stashed_params(batch_idx)
self.partition.recompute(batch_idx)
g = self.comm_handler.wait_recv_gradients(batch_idx, last_due_end)
if (next_backward_batch_idx is not None):
next_backward_batch_idx_last_due_end = ((next_backward_batch_idx + 1) == num_batches)
self.comm_handler.pre_recv_gradients(next_backward_batch_idx, num_batches, next_backward_batch_idx_last_due_end)
old_lrs = None
do_step = self.is_last_micro_batch(batch_idx)
if ((not do_step) and last_due_end):
do_step = True
old_lrs = self.scale_lr(self.reminder_scaler_lr_factor)
if ((not do_step) and self.is_replicated):
with self.backward_nosync_context_manager():
self.partition.backward_from_recomputed(g, batch_idx)
else:
self.partition.backward_from_recomputed(g, batch_idx)
request_objects = None
if (not self.is_first_partition):
request_objects = self.comm_handler.send_gradients(self.partition.get_grad(batch_idx), batch_idx)
if do_step:
trainer = self.trainer
weight_stasher = self.weight_stasher
if weight_stasher:
if (self.gap_aware_just_loss or self.weight_stashing_just_for_stats):
stashed_theta = weight_stasher.pop_stashed_buff(batch_idx)
real_theta = None
not_loaded_theta = stashed_theta
else:
real_theta = self.true_weights_storage.get_true_weights()
stashed_theta = None
not_loaded_theta = real_theta
try_record_real_gap_from_current(trainer.statistics, trainer.optimizer, not_loaded_theta, pre_computed_gap=None)
else:
real_theta = None
stashed_theta = None
if self.gap_aware:
trainer: Union[(GapAwareTrainerMixin, PipelineSupportedTrainerType)]
if (self.step_every > 1):
raise NotImplementedError()
delay = self.delay_at_batch.pop(batch_idx)
trainer.apply_gap_aware(real_theta=real_theta, delay=delay, stashed_theta=stashed_theta)
if weight_stasher:
weight_stasher.mark_stashed_as_dirty()
self.true_weights_storage.restore_if_needed()
self.true_weights_storage.reset_on_step()
trainer.non_last_partition_step(old_lrs)
else:
self.true_weights_storage.restore_if_needed()
if (self.gap_aware_just_loss and self.weight_stasher):
weight_stasher.pop_stashed_buff(batch_idx)
return request_objects
def expected_staleness(self, done_fwds, done_bwds):
return sum([self.is_last_micro_batch(x) for x in range(done_bwds, done_fwds)])
def run_forward_until_flush(self, num_batches):
'\n Running evaluation (pipelined)\n Requires:\n set_dataloader() was called (in case stage requires data loading)\n eval() was called\n '
run_batch_forward = self.run_batch_forward
futures_handler = self.futures_handler
if self.is_last_partition:
b_tqdm = tqdm(range(num_batches), desc='Eval')
else:
b_tqdm = range(num_batches)
for done_fwds in b_tqdm:
ro = run_batch_forward(done_fwds, num_batches)
futures_handler.after_forward(ro, done_fwds, False)
futures_handler.clean_eval()
def run_until_flush(self, num_batches, flush_rate=(- 1)):
'\n Requires:\n set_dataloader() was called (in case stage requires data loading)\n train() was called\n '
done_bwds = 0
done_fwds = 0
reminder = (num_batches % self.step_every)
self.first_effected_batch = (num_batches - reminder)
self.reminder_scaler_lr_factor = (reminder / self.step_every)
stage_depth = self.stage_depth
true_stage_depth = self.true_stage_depth
pipeline_depth = self.pipeline_depth
work_scheduler = self.work_scheduler
is_last_partition = self.is_last_partition
run_batch_backward = self.run_batch_backward
run_batch_forward = self.run_batch_forward
futures_handler = self.futures_handler
self.work_scheduler.reset()
if is_last_partition:
b_tqdm = tqdm(range(num_batches), desc='Train')
b_tqdm_it = iter(b_tqdm)
while (done_bwds < num_batches):
action_is_fwd = work_scheduler(stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds)
if action_is_fwd:
ro = run_batch_forward(done_fwds, num_batches, done_bwds=done_bwds)
if (true_stage_depth == 0):
futures_handler.after_backward(ro, done_bwds)
elif (stage_depth == 0):
futures_handler.after_forward(ro, done_fwds, True)
ro = run_batch_backward(done_bwds, num_batches)
futures_handler.after_backward(ro, done_bwds)
else:
futures_handler.after_forward(ro, done_fwds, True)
else:
if ((done_bwds + 1) < num_batches):
next_backward_batch_idx_to_run = (done_bwds + 1)
else:
next_backward_batch_idx_to_run = None
ro = run_batch_backward(done_bwds, num_batches, next_backward_batch_idx=next_backward_batch_idx_to_run)
futures_handler.after_backward(ro, done_bwds)
if (stage_depth == 0):
done_bwds += 1
done_fwds += 1
if is_last_partition:
next(b_tqdm_it)
elif action_is_fwd:
done_fwds += 1
else:
done_bwds += 1
if ((not self.trainer.PER_STEP_SCHEDULER) and (flush_rate < 0)):
self.lr_scheduler.step()
futures_handler.clean_train()
|
class GPipePartitionManager(SinglePartitionManager):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.saved_for_backward = dict()
def _init_partition(self, partition, use_recomputation, disable_input_clone, req_grad):
TO_DEVICE = False
is_last_partition = self.is_last_partition
is_first_partition = self.is_first_partition
device = self.device
if use_recomputation:
if is_last_partition:
partition_cls = GPipeLastPartition
elif is_first_partition:
partition_cls = GPipeFirstPartition
else:
partition_cls = GPipePartition
if disable_input_clone:
partition_cls._CLONE_INPUTS = False
self.partition = partition_cls(partition, device, to_device=TO_DEVICE, req_grad=req_grad)
else:
raise NotImplementedError('GPIPE stages without recomputation not yet supported')
if (not TO_DEVICE):
self.partition.to(device)
def run_batch_forward(self, batch_idx, num_batches, done_bwds=None):
' Handles the forward pass, for last partition also prepares for the backward pass.\n by saving last result\n\n Algorithm:\n - Get the data\n - Forward pass\n - Send to next partition\n - for last micro batch - behave differently.\n\n In more detail:\n # (1) PRELOAD\n # (2) the actual forward\n # (3) send activation (if not last partition)\n\n Feature:\n - Pre load Y to last partition if possible\n '
partition = self.partition
is_training = partition.training
last_due_end = ((batch_idx + 1) == num_batches)
last_due_step_every = (((batch_idx + 1) % self.step_every) == 0)
is_last_micro_batch = (last_due_step_every or last_due_end)
partition.is_last_micro_batch = is_last_micro_batch
(preload_input_partition, preload_input_to_outside_loss) = self.data_propagator.preload_from_dataloader(self.dl_iter)
(request_objects, x, ctx) = self.forward_pass_and_send(batch_idx, num_batches, preload_input_partition)
if (not self.is_last_partition):
return request_objects
else:
ctx = (*preload_input_to_outside_loss, *ctx)
if (not is_training):
self.trainer.calc_test_stats(x, *ctx)
return []
elif is_last_micro_batch:
self.saved_for_backward[batch_idx] = (x, *ctx)
else:
self.saved_for_backward[batch_idx] = ctx
def last_partition_batch_backward(self, batch_idx: int, num_batches: int, next_backward_batch_idx=None):
last_due_step_every = (((batch_idx + 1) % self.step_every) == 0)
last_due_end = ((batch_idx + 1) == num_batches)
is_last_micro_batch = (last_due_step_every or last_due_end)
partition = self.partition
trainer = self.trainer
partition.is_last_micro_batch = is_last_micro_batch
is_first_micro_batch = ((batch_idx % self.step_every) == 0)
do_step = is_first_micro_batch
is_final_shorter_batch = ((batch_idx + self.step_every) > num_batches)
change_lr = (do_step and is_final_shorter_batch)
if (not is_last_micro_batch):
self.partition.recompute(batch_idx)
x = self.partition.pop_saved_graph_head(batch_idx)
if (not isinstance(x, Tensor)):
assert (len(x) == 1)
x = x[0]
ctx = self.saved_for_backward.pop(batch_idx)
else:
(x, *ctx) = self.saved_for_backward.pop(batch_idx)
if ((not do_step) and self.is_replicated):
with self.backward_nosync_context_manager():
step_and_stats_ctx = trainer.backprop_last_partition(x, *ctx)
else:
step_and_stats_ctx = trainer.backprop_last_partition(x, *ctx)
request_objects = self.comm_handler.send_gradients(partition.get_grad(batch_idx), batch_idx)
if change_lr:
old_lrs = self.scale_lr(self.reminder_scaler_lr_factor)
else:
old_lrs = None
trainer.last_partition_step_and_statistics(x, *ctx, step_and_stats_ctx, step=do_step, old_lrs=old_lrs)
self.maybe_log_lr()
return request_objects
def run_batch_backward(self, batch_idx: int, num_batches: int, next_backward_batch_idx=None):
' Runs the backwards pass + step for all partitions except the last partition '
the_bwd_of_last_fwd_due_step_every = (((batch_idx + 1) % self.step_every) == 0)
the_bwd_of_last_fwd_due_end = ((batch_idx + 1) == num_batches)
self.comm_handler.pre_recv_gradients(batch_idx, num_batches, the_bwd_of_last_fwd_due_end)
is_last_micro_batch = (the_bwd_of_last_fwd_due_step_every or the_bwd_of_last_fwd_due_end)
partition = self.partition
partition.is_last_micro_batch = is_last_micro_batch
is_first_micro_batch = ((batch_idx % self.step_every) == 0)
do_step = is_first_micro_batch
if (not is_last_micro_batch):
self.partition.recompute(batch_idx)
g = self.comm_handler.wait_recv_gradients(batch_idx, the_bwd_of_last_fwd_due_end)
if (next_backward_batch_idx is not None):
next_backward_batch_idx_last_due_end = False
self.comm_handler.pre_recv_gradients(next_backward_batch_idx, num_batches, next_backward_batch_idx_last_due_end)
if ((not do_step) and self.is_replicated):
with self.backward_nosync_context_manager():
partition.backward_from_recomputed(g, batch_idx)
else:
partition.backward_from_recomputed(g, batch_idx)
request_objects = None
if (not self.is_first_partition):
g = partition.get_grad(batch_idx)
request_objects = self.comm_handler.send_gradients(g, batch_idx)
del g
if do_step:
trainer = self.trainer
is_final_shorter_batch = ((batch_idx + self.step_every) > num_batches)
if is_final_shorter_batch:
old_lrs = self.scale_lr(self.reminder_scaler_lr_factor)
else:
old_lrs = None
trainer.non_last_partition_step(old_lrs)
return request_objects
def run_until_flush(self, num_batches, flush_rate=(- 1)):
'\n Requires:\n set_dataloader() was called (in case stage requires data loading)\n train() was called\n\n # NOTE: its different from async pipeline\n '
done_bwds = 0
done_fwds = 0
reminder = (num_batches % self.step_every)
self.first_effected_batch = (num_batches - reminder)
self.reminder_scaler_lr_factor = (reminder / self.step_every)
stage_depth = self.stage_depth
pipeline_depth = self.pipeline_depth
work_scheduler = self.work_scheduler
is_last_partition = self.is_last_partition
run_batch_backward = (self.run_batch_backward if (not is_last_partition) else self.last_partition_batch_backward)
run_batch_forward = self.run_batch_forward
futures_handler = self.futures_handler
mark_bwd_start = 0
if is_last_partition:
b_tqdm = tqdm(range(num_batches), desc='Train')
b_tqdm_it = iter(b_tqdm)
while (done_bwds < num_batches):
action_is_fwd = work_scheduler(stage_depth, pipeline_depth, num_batches, done_fwds, done_bwds)
if action_is_fwd:
ro = run_batch_forward(done_fwds, num_batches, done_bwds=done_bwds)
futures_handler.after_forward(ro, done_fwds, True)
done_fwds += 1
else:
if ((done_fwds == (done_bwds + self.step_every)) or (done_bwds == self.first_effected_batch)):
mark_bwd_start = done_bwds
micro_batch_to_run = ((done_fwds - 1) - done_bwds)
batch_idx_to_run = (mark_bwd_start + micro_batch_to_run)
if ((not is_last_partition) and ((done_bwds + 1) < done_fwds)):
next_backward_micro_batch_idx = ((done_fwds - 1) - (done_bwds + 1))
next_backward_batch_idx_to_run = (mark_bwd_start + next_backward_micro_batch_idx)
else:
next_backward_batch_idx_to_run = None
ro = run_batch_backward(batch_idx_to_run, num_batches, next_backward_batch_idx=next_backward_batch_idx_to_run)
futures_handler.after_backward(ro, done_bwds)
done_bwds += 1
if is_last_partition:
next(b_tqdm_it)
if ((not self.trainer.PER_STEP_SCHEDULER) and (flush_rate < 0)):
self.lr_scheduler.step()
futures_handler.clean_train()
|
def set_inplace_false_(m):
' return True if replaced '
if (hasattr(m, 'inplace') and m.inplace):
m.inplace = False
return True
return False
|
def replace_inplace_for_a_given_layer_(model, layer_name='l_0'):
' return True if replaced. '
if hasattr(model, layer_name):
return set_inplace_false_(getattr(model, layer_name))
return False
|
def replace_inplace_for_first_innermost_layer_(model):
'\n model: torch.nn.Module.\n\n return True if replaced\n '
(first_innermost_layer, name) = get_innnermost_first_layer_and_name(model, '')
if (not name):
assert (first_innermost_layer is model)
return set_inplace_false_(first_innermost_layer)
|
def get_innnermost_first_layer_and_name(partition, name=''):
" \n Args:\n partition: a torch.nn.Module\n name: is the name for the partition in the calling context\n\n Returns:\n the innermost layer and its name\n\n Example:\n >>> import torch\n >>> m = torch.nn.TransformerDecoderLayer(d_model=10, nhead=2)\n >>> layer, name = get_innnermost_first_layer_and_name(m, 'm')\n >>> print(layer, name)\n\n Linear(in_features=10, out_features=10, bias=True) out_proj\n "
list_children = list(partition.named_children())
if (not list_children):
return (partition, name)
(name, last_layer) = list_children[0]
del list_children
return get_innnermost_first_layer_and_name(last_layer, name)
|
def get_outermost_last_layer_and_name(partition, name=''):
" \n Args:\n partition: a torch.nn.Module\n name: is the name for the partition in the calling context\n\n Returns:\n the outermost layer and its name \n outermost: defined last.\n\n Example:\n >>> import torch\n >>> m = torch.nn.TransformerDecoderLayer(d_model=10, nhead=2)\n >>> layer, name = get_outermost_last_layer_and_name(m, 'm')\n >>> print(layer, name)\n\n Dropout(p=0.1, inplace=False) dropout3\n # NOTE: this is not accurate, the last thing is actually norm()\n "
list_children = list(partition.named_children())
if (not list_children):
return (partition, name)
(name, last_layer) = list_children[(- 1)]
del list_children
return get_outermost_last_layer_and_name(last_layer, name)
|
class PartitionRngStasher():
"\n Utility class to stash and restore RNG state.\n Used during recomputation.\n\n Pop happens when we restore the state (therefore can only restore state once).\n\n # NOTE: \n # (1) it will be problematic when 2 recomputing stages are use the same device. (e.g tied GPipe)\n # (2) currently does not set numpy or python random seeds. just pytorch's. (TODO)\n "
def __init__(self, device=torch.device('cpu')):
self.device = device
self.state = {}
self.devices = ([self.device] if (self.device.type == 'cuda') else [])
def stash_rng_state(self, micro_batch_index):
' Stash RNG state '
cpu_rng_state = torch.get_rng_state()
if (self.device.type == 'cuda'):
with torch.cuda.device(self.device):
gpu_rng_state = torch.cuda.get_rng_state()
else:
gpu_rng_state = None
self.state[micro_batch_index] = (cpu_rng_state, gpu_rng_state)
def restore_rng_state(self, micro_batch_index):
(cpu_rng_state, gpu_rng_state) = self.state.pop(micro_batch_index)
torch.set_rng_state(cpu_rng_state)
if (not (gpu_rng_state is None)):
torch.cuda.set_rng_state(gpu_rng_state, self.device)
def clear_state(self):
self.state.clear()
|
def register_trainer(name, trainer_cls: Type[PipelineSupportedTrainerType]):
'Registers trainer with mixins.'
AVAILABLE_TRAINERS[name] = trainer_cls
AVAILABLE_TRAINERS[(name + '_local_grad_norm')] = local_grad_norm_mixin_trainer_factory(trainer_cls=trainer_cls)
AVAILABLE_TRAINERS[(name + '_global_grad_norm')] = global_grad_norm_mixin_trainer_factory(trainer_cls=trainer_cls)
AVAILABLE_TRAINERS[(name + '_local_grad_norm_prop')] = local_grad_norm_prop_mixin_trainer_factory(trainer_cls=trainer_cls)
AVAILABLE_TRAINERS[(name + '_gap_aware')] = gap_aware_trainer_factory(trainer_cls=trainer_cls)
|
def get_trainer_cls(args) -> Type[PipelineSupportedTrainerType]:
trainer_cls = AVAILABLE_TRAINERS.get(args.trainer['type'])
assert (trainer_cls is not None)
return trainer_cls
|
def SQUAD_loss(logits, start_positions, end_positions):
(start_logits, end_logits) = logits.split(1, dim=(- 1))
start_logits = start_logits.squeeze((- 1))
end_logits = end_logits.squeeze((- 1))
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
start_loss = F.cross_entropy(start_logits, start_positions, ignore_index=ignored_index)
end_loss = F.cross_entropy(end_logits, end_positions, ignore_index=ignored_index)
total_loss = ((start_loss + end_loss) / 2)
return total_loss
|
def to_list(tensor):
return tensor.detach().cpu().tolist()
|
class SquadTrainer(ScheduledOptimizationStepMultiPartitionTrainer):
PER_STEP_SCHEDULER = True
def __init__(self, model: Module, optimizer: Optimizer, scheduler, statistics: SquadStats, step_every=1):
super().__init__(model, optimizer, scheduler, statistics)
self.step_every = step_every
self.features = None
def advanced_test_stats(self, x, example_indices):
(start_logits, end_logits) = x.split(1, dim=(- 1))
start_logits = start_logits.squeeze((- 1))
end_logits = end_logits.squeeze((- 1))
for (i, example_index) in enumerate(example_indices):
eval_feature = self.features[example_index.item()]
unique_id = int(eval_feature.unique_id)
result = SquadResult(unique_id, to_list(start_logits[i]), to_list(end_logits[i]))
self.statistics.all_results.append(result)
def calc_test_stats(self, x, start_positions, end_positions, example_indices=None, batch_size=None):
loss = SQUAD_loss(x, start_positions, end_positions)
self.statistics.update_on_batch('loss', loss.item(), batch_size)
if (example_indices is not None):
self.advanced_test_stats(x, example_indices)
def backprop_last_partition(self, x, start_positions, end_positions, batch_size):
loss = SQUAD_loss(x, start_positions, end_positions)
if (self.step_every > 1):
loss /= self.step_every
loss.backward()
return loss
def last_partition_step_and_statistics(self, x, start_positions, end_positions, batch_size, loss, step=True, old_lrs=None):
'\n x: is model output.\n \n step\n stats\n\n step can be used later for grad accumulations\n '
if step:
self.step_on_computed_grads(old_lrs)
loss = loss.item()
self.statistics.update_on_batch('loss', loss, batch_size)
|
class CEPTrainer(ScheduledOptimizationStepMultiPartitionTrainer):
PER_STEP_SCHEDULER = False
def __init__(self, model: Module, optimizer: Optimizer, scheduler, statistics: Stats, step_every=1):
super().__init__(model, optimizer, scheduler, statistics)
self.loss_fn = torch.nn.BCEWithLogitsLoss()
self.step_every = 1
def calc_test_stats(self, x, y, batch_size):
loss = self.loss_fn(x, y)
assert (batch_size == len(y))
y_pred = torch.ge(x, 0.5)
num_correct = torch.sum((y == y_pred)).item()
self.statistics.update_on_batch('loss', loss.item(), batch_size)
self.statistics.update_on_batch('acc', num_correct, batch_size)
def last_partition_step_and_statistics(self, x, y, batch_size, loss, step=True, old_lrs=None):
'\n step\n stats\n\n step can be used later for grad accumulations\n '
assert (batch_size == len(y))
y_pred = torch.ge(x, 0.5)
num_correct = torch.sum((y == y_pred)).item()
if step:
self.step_on_computed_grads(old_lrs)
self.statistics.update_on_batch('loss', loss.item(), batch_size)
self.statistics.update_on_batch('acc', num_correct, batch_size)
def backprop_last_partition(self, x, y, *args, **kw):
loss = self.loss_fn(x, y)
if (self.step_every > 1):
loss /= self.step_every
loss.backward()
return loss
|
class CVTrainer(ScheduledOptimizationStepMultiPartitionTrainer):
PER_STEP_SCHEDULER = False
def __init__(self, model: Module, optimizer: Optimizer, scheduler, statistics: Stats, step_every=1):
super(CVTrainer, self).__init__(model, optimizer, scheduler, statistics)
self.step_every = step_every
self.loss_fn = torch.nn.CrossEntropyLoss()
def backprop_last_partition(self, x, y, *args, **kw):
loss = self.loss_fn(x, y)
if (self.step_every > 1):
loss /= self.step_every
loss.backward()
return loss
def calc_test_stats(self, x, y, batch_size):
loss = self.loss_fn(x, y)
assert (batch_size == len(y))
y_pred = torch.argmax(x, 1)
num_correct = torch.sum((y == y_pred)).item()
self.statistics.update_on_batch('loss', loss.item(), batch_size)
self.statistics.update_on_batch('acc', num_correct, batch_size)
def last_partition_step_and_statistics(self, x, y, batch_size, loss, step=True, old_lrs=None):
'\n step\n stats\n\n step can be used later for grad accumulations\n '
assert (batch_size == len(y))
y_pred = torch.argmax(x, 1)
num_correct = torch.sum((y == y_pred)).item()
if step:
self.step_on_computed_grads(old_lrs)
self.statistics.update_on_batch('loss', loss.item(), batch_size)
self.statistics.update_on_batch('acc', num_correct, batch_size)
|
class CVTrainerPerStep(CVTrainer):
PER_STEP_SCHEDULER = True
|
class GapAwareTrainerMixin():
HAS_GAP_AWARE = True
def __init__(self, gap_aware: GapAwareBase, scheduler=None):
self.gap_aware = gap_aware
if (scheduler is not None):
gap_aware.patch_scheduler(scheduler)
def apply_gap_aware(self, real_theta=None, delay=None, stashed_theta=None):
' NOTE: we assume that if `real_theta` is given, a stashed weight is loaded into the model\n Otherwise, if stashed theta is given, we assume that the true weights are already loaded into the model,\n and we compute the gap from the stashed weights (used in "Gap aware just for loss" algorithm.\n '
ga = self.gap_aware
ga.update_running_stats()
if delay:
if real_theta:
ga.apply_on_theta(real_theta)
elif stashed_theta:
ga.apply_on_stashed(stashed_theta)
else:
assert (delay == 1)
ga.apply_from_grad()
|
def gap_aware_trainer_factory(trainer_cls: Type[PipelineSupportedTrainerWithoutGapAware]):
class GapAwareCreatedTrainer(trainer_cls, GapAwareTrainerMixin):
def __init__(self, gap_aware, scheduler=None, **kw):
trainer_cls.__init__(self, scheduler=scheduler, **kw)
GapAwareTrainerMixin.__init__(self, gap_aware, scheduler=scheduler)
return GapAwareCreatedTrainer
|
class GlueTrainer(ScheduledOptimizationStepMultiPartitionTrainer):
PER_STEP_SCHEDULER = True
def __init__(self, model: Module, optimizer: Optimizer, scheduler, statistics: Stats, step_every=1):
super().__init__(model, optimizer, scheduler, statistics)
self.features = None
self.num_labels = None
self.loss_fn = None
self.step_every = step_every
def calc_test_stats(self, x, labels, batch_size=None):
loss = self.loss_fn(x, labels)
self.statistics: GlueStats
self.statistics.update_on_batch('loss', loss.item(), batch_size)
self.statistics.predictions.append(x.detach())
self.statistics.label_ids.append(labels.detach())
def backprop_last_partition(self, x, labels, batch_size):
loss = self.loss_fn(x, labels)
if (self.step_every > 1):
loss /= self.step_every
loss.backward()
return loss
def last_partition_step_and_statistics(self, x, labels, batch_size, loss, step=True, old_lrs=None):
'\n x: is model output.\n \n step\n stats\n\n step can be used later for grad accumulations\n '
if step:
self.step_on_computed_grads(old_lrs)
loss = loss.item()
self.statistics.update_on_batch('loss', loss, batch_size)
|
def global_grad_norm_mixin_trainer_factory(trainer_cls: Type[ScheduledOptimizationStepMultiPartitionTrainer]):
class GradNormMixedTrainer(trainer_cls):
def __init__(self, *args, max_grad_norm=None, always_calc_grad_norm=False, **kw):
super().__init__(*args, **kw)
self.always_calc_grad_norm = always_calc_grad_norm
self.max_grad_norm = max_grad_norm
def step_on_computed_grads(self, old_lrs=None):
self._grad_norm()
return super().step_on_computed_grads(old_lrs=old_lrs)
def _grad_norm(self):
if (not (self.max_grad_norm or self.always_calc_grad_norm)):
return
with torch.no_grad():
my_total_norm = calc_local_total_norm_wo_sqrt(self.model.parameters(), norm_type=2)
if (not isinstance(my_total_norm, torch.Tensor)):
my_total_norm = torch.tensor(0, dtype=torch.float32)
my_total_norm: torch.Tensor
my_total_local_norm = my_total_norm.item()
my_total_norm.to(torch.float32)
dist.all_reduce(my_total_norm, op=dist.ReduceOp.SUM)
total_norm = torch.sqrt(my_total_norm)
if (total_norm and self.statistics.has_statistic('grad_norm') and (dist.get_rank() == 0)):
self.statistics.update_on_batch('grad_norm', total_norm.item(), 1)
if (my_total_local_norm and self.statistics.has_statistic('local_grad_norm')):
self.statistics.update_on_batch('local_grad_norm', my_total_local_norm, 1)
if self.max_grad_norm:
clip_coef = (self.max_grad_norm / (total_norm + 1e-06))
if (clip_coef < 1):
for p in self.model.parameters():
p.grad.detach().mul_(clip_coef.to(p.grad.device))
return GradNormMixedTrainer
|
def local_grad_norm_mixin_trainer_factory(trainer_cls: Type[ScheduledOptimizationStepMultiPartitionTrainer]):
class GradNormMixedTrainer(trainer_cls):
def __init__(self, *args, max_grad_norm=None, always_calc_grad_norm=False, **kw):
super().__init__(*args, **kw)
self.always_calc_grad_norm = always_calc_grad_norm
self.max_grad_norm = max_grad_norm
def step_on_computed_grads(self, old_lrs=None):
self._grad_norm()
return super().step_on_computed_grads(old_lrs=old_lrs)
def _grad_norm(self):
total_norm = None
if self.max_grad_norm:
with torch.no_grad():
total_norm = clip_grad_norm_(self.model.parameters(), self.max_grad_norm, norm_type=2)
elif self.always_calc_grad_norm:
with torch.no_grad():
total_norm = calc_local_total_norm(self.model.parameters(), norm_type=2)
if (total_norm and self.statistics.has_statistic('local_grad_norm')):
self.statistics.update_on_batch('local_grad_norm', total_norm.item(), 1)
return GradNormMixedTrainer
|
def local_grad_norm_prop_mixin_trainer_factory(trainer_cls: Type[ScheduledOptimizationStepMultiPartitionTrainer]):
class GradNormMixedTrainer(trainer_cls):
def __init__(self, *args, max_grad_norm=None, always_calc_grad_norm=False, **kw):
super().__init__(*args, **kw)
self.always_calc_grad_norm = always_calc_grad_norm
self.max_grad_norm = max_grad_norm
my_grad_params = sum((p.numel() for p in self.model.parameters() if p.requires_grad))
total_params = torch.tensor(my_grad_params, dtype=torch.int64)
dist.all_reduce(total_params, op=dist.ReduceOp.SUM)
self.max_grad_norm *= (my_grad_params / total_params.item())
def step_on_computed_grads(self, old_lrs=None):
self._grad_norm()
return super().step_on_computed_grads(old_lrs=old_lrs)
def _grad_norm(self):
total_norm = None
if self.max_grad_norm:
with torch.no_grad():
total_norm = clip_grad_norm_(self.model.parameters(), self.max_grad_norm, norm_type=2)
elif self.always_calc_grad_norm:
with torch.no_grad():
total_norm = calc_local_total_norm(self.model.parameters(), norm_type=2)
if (total_norm and self.statistics.has_statistic('local_grad_norm')):
self.statistics.update_on_batch('local_grad_norm', total_norm.item(), 1)
return GradNormMixedTrainer
|
class LastPartitionTrainer(abc.ABC):
@abc.abstractmethod
def backprop_last_partition(self, *args, **kw):
pass
@abc.abstractmethod
def last_partition_step_and_statistics(self, *args, **kw):
pass
@abc.abstractmethod
def step_on_computed_grads(self, **kw):
pass
@abc.abstractmethod
def calc_test_stats(self, *args, **kw):
pass
|
class DataAndLabelsLastPartitionTrainer(LastPartitionTrainer):
'Adding x,y to represents (data,labels).'
@abc.abstractmethod
def backprop_last_partition(self, x, y, *args, **kw):
pass
@abc.abstractmethod
def last_partition_step_and_statistics(self, x, y, *args, **kw):
'\n Usually used for the last partition (or any other partition were x,y are needed)\n to calculate loss, gradients and do training steps\n\n We currently assume its the last partition for simplicity\n '
pass
|
class MultiPartitionTrainer(LastPartitionTrainer):
def __init__(self, optimizer: Optimizer, statistics: Stats):
self.optimizer = optimizer
self.statistics = statistics
@abc.abstractmethod
def non_last_partition_step(self, *args, **kw):
pass
|
class ScheduledOptimizationStepMultiPartitionTrainer(MultiPartitionTrainer):
PER_STEP_SCHEDULER = False
def __init__(self, model, optimizer, scheduler, statistics: Stats):
super().__init__(optimizer, statistics)
self.model = model
self.scheduler = scheduler
def non_last_partition_step(self, old_lrs=None):
self.step_on_computed_grads(old_lrs=old_lrs)
def step_on_computed_grads(self, old_lrs=None):
self.optimizer.step()
for pg in self.optimizer.param_groups:
for p in pg['params']:
p.grad = None
if old_lrs:
pgs = self.optimizer.param_groups
for (g, old_lr) in zip(pgs, old_lrs):
g['lr'] = old_lr
if self.PER_STEP_SCHEDULER:
self.scheduler.step()
|
class LMTrainer(ScheduledOptimizationStepMultiPartitionTrainer):
PER_STEP_SCHEDULER = True
def __init__(self, model: Module, optimizer: Optimizer, scheduler, statistics: Stats, step_every=1):
super().__init__(model, optimizer, scheduler, statistics)
self.step_every = step_every
def calc_test_stats(self, loss, batch_size):
loss = loss.item()
self.statistics.update_on_batch('loss', loss, batch_size)
self.statistics.update_on_batch('ppl', loss, batch_size)
def last_partition_step_and_statistics(self, x, batch_size, loss, step=True, old_lrs=None):
'\n x: is model output.\n \n step\n stats\n\n step can be used later for grad accumulations\n '
if step:
self.step_on_computed_grads(old_lrs)
loss = loss.item()
self.statistics.update_on_batch('loss', loss, batch_size)
self.statistics.update_on_batch('ppl', loss, batch_size)
def backprop_last_partition(self, loss, *args, **kw):
if (self.step_every > 1):
loss /= self.step_every
loss.backward()
return loss
|
def register_statistics(name: str, stats_cls: Type[Stats]):
AVAILBALE_STATS[name] = stats_cls
AVAILBALE_STATS[(name + '_loss_per_batch')] = stats_cls
|
def get_statistics(name: str, *args, **kw) -> Stats:
record_loss_per_batch = ('loss_per_batch' in name)
st_cls = AVAILBALE_STATS.get(name)
return st_cls(*args, record_loss_per_batch=record_loss_per_batch, **kw)
|
class CVStats(Stats):
' Class to handle statistics collection for CV '
def __init__(self, record_loss_per_batch=False, is_last_partition=True):
super().__init__(is_last_partition=is_last_partition)
self.add_statistic(name='loss', meter=AverageMeter(), per_batch=record_loss_per_batch, per_epoch=(not record_loss_per_batch), train=True, test=True)
self.add_statistic(name='acc', meter=AccuracyMeter(), per_batch=False, per_epoch=True, train=True, test=True)
self.record_loss_per_batch = record_loss_per_batch
def get_epoch_info_str(self, is_train):
if is_train:
name = 'train'
loss = self.fit_res.train_loss[(- 1)]
acc = self.fit_res.train_acc[(- 1)]
else:
name = 'valid'
loss = self.fit_res.test_loss[(- 1)]
acc = self.fit_res.test_acc[(- 1)]
return ' | {} loss {:5.2f} | {} acc {:4.2f}'.format(name, loss, name, acc)
|
class NormCVstats(CVStats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='grad_norm', meter=AverageMeter(), per_batch=True, per_epoch=False, train=True, test=False)
self.add_statistic(name='local_grad_norm', meter=AverageMeter(), per_batch=True, per_epoch=False, train=True, test=False)
self.register_pipeline_per_stage_statistic('local_grad_norm')
|
class CVDistanceNorm(NormCVstats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='gap', meter=AverageMeter(), per_batch=False, per_epoch=True, train=True, test=False)
self.register_pipeline_per_stage_statistic('gap')
|
class CVDistance(CVStats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='gap', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('gap')
|
def try_record_real_gap_from_current(statistics: Stats, optimizer: Optimizer, real_theta, pre_computed_gap=None, gap_name='gap'):
' calculates gap between model parameters and a given set of parameters, real_theta\n real_theta: Given set of parameters. TODO: rename\n '
if statistics.has_statistic(gap_name):
if (pre_computed_gap is None):
if (real_theta is None):
gap = 0
else:
with torch.no_grad():
gap = sum([torch.dist(a, b, p=2).item() for (a, b) in zip(chain.from_iterable([[p for p in pg['params']] for pg in optimizer.param_groups]), chain.from_iterable(real_theta))])
else:
gap = pre_computed_gap
statistics.update_on_batch(gap_name, gap, 1)
return gap
|
def glue_compute_metrics_name(task_name):
if (task_name == 'cola'):
return 'mcc'
elif (task_name == 'sst-2'):
return 'acc'
elif (task_name == 'mrpc'):
return 'acc_and_f1'
elif (task_name == 'sts-b'):
return 'corr'
elif (task_name == 'qqp'):
return 'acc_and_f1'
elif (task_name == 'mnli'):
return 'mnli/acc'
elif (task_name == 'mnli-mm'):
return 'mnli-mm/acc'
elif (task_name == 'qnli'):
return 'acc'
elif (task_name == 'rte'):
return 'acc'
elif (task_name == 'wnli'):
return 'acc'
elif (task_name == 'hans'):
return 'acc'
else:
raise KeyError(task_name)
|
class GlueStats(Stats):
' Class to handle statistics collection for Glue Tasks '
def __init__(self, record_loss_per_batch=False, is_last_partition=True):
super().__init__(is_last_partition=is_last_partition)
self.add_statistic(name='loss', meter=AverageMeter(), per_batch=record_loss_per_batch, per_epoch=(not record_loss_per_batch), train=True, test=True)
self.record_loss_per_batch = record_loss_per_batch
self.predictions = []
self.label_ids = []
def set_glue_task(self, task_name):
self.task = task_name
self.metric_name = glue_compute_metrics_name(task_name)
def get_metric_for_early_stop(self):
num_epochs = self.fit_res.num_epochs
v = self.fit_res.glue_results[num_epochs][self.metric_name]
return v
def last_partition_on_epoch_end(self):
super().last_partition_on_epoch_end()
if (not self.training):
self.evaluate_glue()
self.predictions.clear()
self.label_ids.clear()
def get_epoch_info_str(self, is_train):
if is_train:
name = 'train'
loss = self.fit_res.train_loss[(- 1)]
else:
name = 'valid'
loss = self.fit_res.test_loss[(- 1)]
return ' | {} loss {:5.2f}'.format(name, loss, name)
|
class NormGluestats(GlueStats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='grad_norm', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('grad_norm')
|
class GlueDistanceNorm(NormGluestats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='gap', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('gap')
|
class GlueDistance(GlueStats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='gap', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('gap')
|
class Stats(abc.ABC):
' Class to handle statistics collection '
FIT_RESULTS_CLASS = SimpleNamespace
def __init__(self, is_last_partition=True):
self.training = True
self.fit_res = self.FIT_RESULTS_CLASS(num_epochs=0)
assert (not (self.fit_res is None))
self.stats_config = dict()
self.is_last_partition = is_last_partition
self.pipeline_per_stage_statistics = []
def train(self):
self.training = True
def eval(self):
self.training = False
def get_epoch_info_str(self, is_train):
return ''
def update_on_batch(self, name, value, n):
self.update_statistic_after_batch_single(name, value, n)
self.update_fit_res_after_batch_single(name, value)
def update_statistic_after_batch_single(self, name, value, n):
' NOTE: Attempt to replace the old function '
cfg = self.stats_config[name]
if cfg['per_epoch']:
meter = getattr(self, f'epoch_{name}_meter')
if (not (value is None)):
meter.update(value, n=n)
else:
print(f'-W- NONE VALUE for {name}, val: {value}')
def update_fit_res_after_batch_single(self, name, value):
cfg = self.stats_config[name]
if cfg['per_batch']:
self._append_value_to_fit_res_by_name(name, value, is_batch=True)
def update_fit_res_after_epoch_all(self):
list_names = [cfg for (cfg, v) in self.stats_config.items() if v['per_epoch']]
for name in list_names:
self.update_fit_res_after_epoch_single(name)
def update_fit_res_after_epoch_single(self, name):
cfg = self.stats_config[name]
if cfg['per_epoch']:
meter = getattr(self, f'epoch_{name}_meter')
value = meter.get_avg()
self._append_value_to_fit_res_by_name(name, value, is_batch=False)
meter.reset()
def _append_value_to_fit_res_by_name(self, name, value, is_batch):
cfg = self.stats_config[name]
if ((self.training and (not cfg['train'])) or ((not self.training) and (not cfg['test']))):
return
if (cfg['train'] and cfg['test']):
fit_name = (f'train_{name}' if self.training else f'test_{name}')
else:
fit_name = name
fit_stat = getattr(self.fit_res, fit_name)
fit_stat.append(value)
def has_statistic(self, name):
return hasattr(self, f'epoch_{name}_meter')
def add_statistic(self, name, meter, per_batch=False, per_epoch=True, train=True, test=True):
if (per_batch and per_epoch):
raise NotImplementedError('Statistics are supported for either batch or epoch.')
setattr(self, f'epoch_{name}_meter', meter)
fit_res_dict = []
if (train and test):
fit_res_dict.append(f'train_{name}')
fit_res_dict.append(f'test_{name}')
elif ((train and (not test)) or (test and (not train))):
fit_res_dict.append(f'{name}')
else:
raise ValueError()
self.stats_config[name] = {'train': train, 'test': test, 'per_batch': per_batch, 'per_epoch': per_epoch, 'fit_res': fit_res_dict}
for i in fit_res_dict:
setattr(self.fit_res, i, [])
def non_last_partition_on_epoch_end(self):
pass
def last_partition_on_epoch_end(self):
if self.training:
self.fit_res.num_epochs += 1
self.update_fit_res_after_epoch_all()
def register_pipeline_per_stage_statistic(self, name):
self.pipeline_per_stage_statistics.append(name)
def get_stats(self, stage_id) -> Dict:
fit_res = _fit_res_to_dict(self.fit_res)
for name in reversed(self.pipeline_per_stage_statistics):
new_name = f'p{stage_id}_{name}'
old_name = name
fit_res[new_name] = fit_res.pop(old_name)
return fit_res
def get_metric_for_early_stop(self):
return None
|
def _fit_res_to_dict(fit_res) -> Dict:
if isinstance(fit_res, NamedTuple):
fit_res = fit_res._asdict()
elif isinstance(fit_res, SimpleNamespace):
fit_res = fit_res.__dict__
return fit_res
|
class PPLMeter(AverageMeter):
' Update like loss, get_avg() gets the PPL '
def get_avg(self):
avg_loss = (self.sum / self.count)
return math.exp(avg_loss)
|
class LMStats(Stats):
' Class to handle statistics collection for LM '
def __init__(self, record_loss_per_batch=False, is_last_partition=True):
super().__init__(is_last_partition=is_last_partition)
self.add_statistic(name='loss', meter=AverageMeter(), per_batch=record_loss_per_batch, per_epoch=(not record_loss_per_batch), train=True, test=True)
self.add_statistic(name='ppl', meter=PPLMeter(), per_batch=False, per_epoch=True, train=True, test=True)
self.record_loss_per_batch = record_loss_per_batch
def get_epoch_info_str(self, is_train):
if is_train:
name = 'train'
loss = self.fit_res.train_loss[(- 1)]
ppl = self.fit_res.train_ppl[(- 1)]
else:
name = 'valid'
loss = self.fit_res.test_loss[(- 1)]
ppl = self.fit_res.test_ppl[(- 1)]
return ' | {} loss {:5.2f} | {} ppl {:4.3f}'.format(name, loss, name, ppl)
|
class NormLMstats(LMStats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='grad_norm', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('grad_norm')
|
class LMDistanceNorm(NormLMstats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='gap', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('gap')
|
class LMDistance(LMStats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='gap', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('gap')
|
class SquadStats(Stats):
' Class to handle statistics collection for Squad '
def __init__(self, record_loss_per_batch=False, is_last_partition=True):
super().__init__(is_last_partition=is_last_partition)
self.add_statistic(name='loss', meter=AverageMeter(), per_batch=record_loss_per_batch, per_epoch=(not record_loss_per_batch), train=True, test=True)
self.record_loss_per_batch = record_loss_per_batch
self.all_results = []
def non_last_partition_on_epoch_end(self):
pass
def last_partition_on_epoch_end(self):
super().last_partition_on_epoch_end()
if (not self.training):
if hasattr(self, 'evaluate_squad'):
self.evaluate_squad()
else:
print(f'-W- {type(self)} does not have `evaluate_squad()` method, which should e set by dataset script. Will calculate loss.')
self.all_results.clear()
def get_epoch_info_str(self, is_train):
if is_train:
name = 'train'
loss = self.fit_res.train_loss[(- 1)]
else:
name = 'valid'
loss = self.fit_res.test_loss[(- 1)]
return ' | {} loss {:7.5f}'.format(name, loss, name)
|
class NormSquadstats(SquadStats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='grad_norm', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('grad_norm')
|
class SquadDistanceNorm(NormSquadstats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='gap', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('gap')
|
class SquadDistance(SquadStats):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.add_statistic(name='gap', meter=AverageMeter(), per_batch=self.record_loss_per_batch, per_epoch=(not self.record_loss_per_batch), train=True, test=False)
self.register_pipeline_per_stage_statistic('gap')
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.sum += (val * n)
self.count += n
def get_avg(self):
return (self.sum / self.count)
|
class AccuracyMeter(AverageMeter):
def __init__(self):
super().__init__()
def update(self, val, n=1):
' just to supoort adding num correct instead of accuracy '
self.sum += val
self.count += n
def get_avg(self):
return ((self.sum / self.count) * 100)
|
class T5Trainer(ScheduledOptimizationStepMultiPartitionTrainer):
PER_STEP_SCHEDULER = True
def __init__(self, model: Module, optimizer: Optimizer, scheduler, statistics: Stats, step_every=1, loss_multiplier=1):
super().__init__(model, optimizer, scheduler, statistics)
self.step_every = step_every
self.loss_multiplier = loss_multiplier
def calc_test_stats(self, x, batch_size=None):
loss = x
self.statistics.update_on_batch('loss', loss.item(), batch_size)
def backprop_last_partition(self, x, batch_size):
loss = x
loss_multiplier = self.loss_multiplier
if (loss_multiplier != 1):
loss *= loss_multiplier
if (self.step_every > 1):
loss /= self.step_every
loss.backward()
return loss
def last_partition_step_and_statistics(self, x, batch_size, loss, step=True, old_lrs=None):
'\n x: is model output.\n \n step\n stats\n\n step can be used later for grad accumulations\n '
if step:
self.step_on_computed_grads(old_lrs)
loss = loss.item()
self.statistics.update_on_batch('loss', loss, batch_size)
|
def calc_local_total_norm(parameters, norm_type=2):
' Exactly like clip_grad_norm_, but without the clip.\n # See https://github.com/pytorch/pytorch/blob/master/torch/nn/utils/clip_grad.py\n '
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter((lambda p: (p.grad is not None)), parameters))
norm_type = float(norm_type)
if (norm_type == inf):
total_norm = max((p.grad.detach().abs().max() for p in parameters))
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type) for p in parameters]), norm_type)
return total_norm
|
def calc_local_total_norm_wo_sqrt(parameters, norm_type=2):
' Exactly like clip_grad_norm_, but without the clip.\n # See https://github.com/pytorch/pytorch/blob/master/torch/nn/utils/clip_grad.py\n '
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter((lambda p: (p.grad is not None)), parameters))
norm_type = float(norm_type)
if (norm_type == inf):
raise NotImplementedError()
else:
total_norm = torch.stack([torch.vdot(v, v) for v in [p.grad.detach().view((- 1)) for p in parameters]]).sum()
return total_norm
|
class TrueWeightsStorage():
'\n NOTE: in case of multiple restores, we take a "copy on write" approach.\n Frist restore: no clone -> copy pointers.\n Second restore: clone the true weights ("pop" the previous ones-to the model)\n This is handled by `self.restored_true_weights_to_the_model`.\n\n '
def __init__(self, optimizer):
self.true_weights = None
self.true_weights_exist = False
self.optimizer = optimizer
self.change_mode = None
self.restored_true_weights_to_the_model = False
def record_change_mode(self, mode):
if (self.change_mode is None):
self.change_mode = mode
elif mode:
self.change_mode += f' -> {mode}'
def get_true_weights(self):
true_weights = self.true_weights
if (true_weights is None):
true_weights = self._return_current_weights()
return true_weights
def create_cloned_if_needed(self):
if ((not self.true_weights_exist) or self.restored_true_weights_to_the_model):
self.true_weights = self._create_current_cloned_buff()
self.true_weights_exist = True
if self.restored_true_weights_to_the_model:
self.restored_true_weights_to_the_model = False
def restore_if_needed(self):
if (self.true_weights_exist and self.change_mode):
self._restore_from_buff(self.true_weights)
self.change_mode = None
self.restored_true_weights_to_the_model = True
def check_restore_if_needed(self, check=True):
' Function to check restore_if_needed calls '
if check:
if (self.true_weights_exist and (not self.change_mode)):
print('-W- will not restore true weights. no change is recorded. Consider removing for efficiency')
self.restore_if_needed()
def reset_on_step(self):
self.true_weights = None
self.true_weights_exist = False
self.change_mode = None
self.restored_true_weights_to_the_model = False
def _restore_from_buff(self, buff):
'load tensor.data from saved buff. Gradients stays the same.'
with torch.no_grad():
for (pg, cloned) in zip(self.optimizer.param_groups, buff):
for (p, bp) in zip(pg['params'], cloned):
p.data = bp.detach()
def _create_current_cloned_buff(self):
buff = [[p.detach().clone() for p in pg['params']] for pg in self.optimizer.param_groups]
return buff
def _return_current_weights(self):
return [[p for p in pg['params']] for pg in self.optimizer.param_groups]
|
def get_world_size(backend) -> int:
'Returns world size (from env), or 1 if not set'
if (backend != 'mpi'):
return int(os.environ.get('WORLD_SIZE', 1))
else:
return int(os.environ.get('OMPI_COMM_WORLD_SIZE', 1))
|
def get_global_rank(backend) -> int:
'Returns global rank (from env), or 0 if not set'
if (backend != 'mpi'):
return int(os.environ.get('RANK', 0))
else:
return int(os.environ.get('OMPI_COMM_WORLD_RANK', 0))
|
class CommPolicy(Enum):
P2P = auto()
BCAST = auto()
|
def to_policy(backend, cpu):
assert (backend in {'nccl', 'gloo', 'mpi'})
if ((backend == 'mpi') or cpu):
return CommPolicy.P2P
return CommPolicy.BCAST
|
def nested_map(func, ts, full=False):
if isinstance(ts, torch.Size):
return func(ts)
elif isinstance(ts, (list, tuple, set)):
return type(ts)((nested_map(func, t, full=full) for t in ts))
elif isinstance(ts, dict):
return {k: nested_map(func, v, full=full) for (k, v) in ts.items()}
elif (isinstance(ts, slice) and full):
start = nested_map(func, ts.start, full=full)
stop = nested_map(func, ts.stop, full=full)
step = nested_map(func, ts.step, full=full)
return slice(start, stop, step)
return func(ts)
|
def flatten(x: Any) -> List[Any]:
'Returns a flattened list of objects from a nested structure.'
l: List[Any] = []
if isinstance(x, torch.Size):
l.append(x)
elif isinstance(x, dict):
for y in x.values():
l.extend(flatten(y))
elif (isinstance(x, list) or isinstance(x, set) or isinstance(x, tuple)):
for y in x:
l.extend(flatten(y))
else:
l.append(x)
return l
|
def unflatten(xs, structure):
res = _unflatten(xs, structure)[0]
f_xs = list(flatten(xs))
f_res = list(flatten(res))
assert (len(f_xs) == len(f_res))
return res
|
def _unflatten(xs, structure):
if isinstance(structure, torch.Size):
return (xs[0], 1)
elif isinstance(structure, (list, tuple, set)):
offset = 0
elements = []
for s in structure:
(e, n) = _unflatten(xs[offset:], s)
elements.append(e)
offset += n
return (type(structure)(elements), offset)
elif isinstance(structure, dict):
offset = 0
elements = dict()
for (k, v) in sorted(structure.items(), key=(lambda t: t[0])):
(e, n) = _unflatten(xs[offset:], v)
elements[k] = e
offset += n
return (elements, offset)
else:
return (xs[0], 1)
|
def detach_tensors(ts):
def detach_if_tensor(t):
if isinstance(t, Tensor):
return t.detach().requires_grad_(t.requires_grad)
return t
return nested_map(detach_if_tensor, ts)
|
def move_tensors(ts, device):
def move(t):
if isinstance(t, (nn.Module, Tensor)):
return t.to(device)
return t
return nested_map(move, ts)
|
def print_tensors(stage, x, in_or_out='out'):
if isinstance(x, torch.Tensor):
pass
else:
t = []
for (i, v) in enumerate(x):
if (not isinstance(v, torch.Tensor)):
t.append(('non-tensor' + str(v)))
else:
t.append(v.shape)
print(f'stage {stage}: {in_or_out}: {t}')
|
def get_weight_predictor(optimizer_type, pred_mem, pred_type, optimizer, scheduler, nag_with_predictor, true_weights_storage, sched_predictor):
if ('sgd' in optimizer_type):
weight_predictor = get_sgd_weight_predictor(optimizer_type, pred_mem, pred_type, optimizer, scheduler=sched_predictor, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
elif ('adam' == optimizer_type):
weight_predictor = get_adam_weight_predictor(pred_mem, pred_type, optimizer, scheduler=sched_predictor, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
elif ('adamw' == optimizer_type):
weight_predictor = get_adamw_weight_predictor(pred_mem, pred_type, optimizer, scheduler=sched_predictor, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
elif ('adafactor' == optimizer_type):
weight_predictor = get_adafactor_weight_predictor(pred_mem, pred_type, optimizer, scheduler=sched_predictor, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
else:
raise NotImplementedError()
return weight_predictor
|
def get_adafactor_weight_predictor(pred_mem: str, pred_type: str, optimizer, scheduler=None, nag_with_predictor=False, true_weights_storage=None) -> WeightPredictor:
has_weight_decay = any([(pg['weight_decay'] != 0) for pg in optimizer.param_groups])
if has_weight_decay:
pass
if (pred_type == 'msnag'):
raise NotImplementedError()
elif (pred_type == 'aggmsnag'):
pred_cls = AdaFactorWClonedWeightPredictionForAggregation
else:
raise NotImplementedError()
return pred_cls(optimizer, fix_fn=None, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
|
def adafactor_init(optimizer):
for pg in optimizer.param_groups:
for p in pg['params']:
state = optimizer.state[p]
grad = p
grad_shape = grad.shape
(factored, use_first_moment) = optimizer._get_options(pg, grad_shape)
if (len(state) == 0):
state['step'] = 0
if use_first_moment:
state['exp_avg'] = torch.zeros_like(grad)
if factored:
state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).to(grad)
state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).to(grad)
else:
state['exp_avg_sq'] = torch.zeros_like(grad)
state['RMS'] = 0
|
class AdaFactorWClonedWeightPredictionForAggregation(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
from optimizers.adafactor import Adafactor
self.optimizer: Adafactor
adafactor_init(self.optimizer)
def forward(self):
if (not self.n_steps):
return
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
if (self.scheduler is not None):
step_lrs = self.scheduler.get_next(self.n_steps)
pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))]
else:
pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs]
with torch.no_grad():
for (group, step_lrs) in zip(pgs, pg_step_lrs):
group = CowDict(group)
for p in group['params']:
if (p.grad is None):
grad = None
else:
grad = p.grad.data
if (grad.dtype in {torch.float16, torch.bfloat16}):
grad = grad.float()
state = self.optimizer.state[p]
state = CowDict(state)
grad_shape = (grad.shape if (grad is not None) else p.shape)
(factored, use_first_moment) = self.optimizer._get_options(group, grad_shape)
assert (len(state) > 0)
if factored:
exp_avg_sq_row = state['exp_avg_sq_row']
exp_avg_sq_col = state['exp_avg_sq_col']
else:
exp_avg_sq = state['exp_avg_sq']
if use_first_moment:
exp_avg = state['exp_avg']
for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs):
p_data_fp32 = p.data
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p_data_fp32 = p_data_fp32.float()
state['step'] += 1
state['RMS'] = self.optimizer._rms(p_data_fp32)
group['lr'] = self.optimizer._get_lr(group, state)
beta2t = (1.0 - math.pow(state['step'], group['decay_rate']))
if (grad is None):
update = torch.full_like(p.data, fill_value=group['eps'][0], memory_format=torch.preserve_format)
else:
update = ((grad ** 2) + group['eps'][0])
if factored:
exp_avg_sq_row = exp_avg_sq_row.mul(beta2t).add_(update.mean(dim=(- 1)), alpha=(1.0 - beta2t))
exp_avg_sq_col = exp_avg_sq_col.mul(beta2t).add_(update.mean(dim=(- 2)), alpha=(1.0 - beta2t))
update = self.optimizer._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
update.mul_(grad)
else:
exp_avg_sq = exp_avg_sq.mul(beta2t).add_(update, alpha=(1.0 - beta2t))
update = exp_avg_sq.rsqrt().mul_(grad)
update.div_((self.optimizer._rms(update) / group['clip_threshold']).clamp_(min=1.0))
update.mul_(group['lr'])
if use_first_moment:
exp_avg = exp_avg.mul(group['beta1']).add_(update, alpha=(1 - group['beta1']))
update = exp_avg
if (group['weight_decay'] != 0):
p_data_fp32.add_(p_data_fp32, alpha=((- group['weight_decay']) * group['lr']))
p_data_fp32.add_((- update))
if (p.data.dtype in {torch.float16, torch.bfloat16}):
p.data.copy_(p_data_fp32)
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed()
|
def get_adam_weight_predictor(pred_mem: str, pred_type: str, optimizer, scheduler=None, nag_with_predictor=False, true_weights_storage=None) -> WeightPredictor:
has_weight_decay = any([(pg['weight_decay'] != 0) for pg in optimizer.param_groups])
if has_weight_decay:
if (pred_type == 'msnag'):
pred_cls = AdamClonedWeightPredictionWithWD
elif (pred_type == 'aggmsnag'):
pred_cls = AdamClonedWeightPredictionForAggregationWithWD
else:
raise NotImplementedError()
elif (pred_type == 'msnag'):
pred_cls = AdamClonedWeightPrediction
elif (pred_type == 'aggmsnag'):
pred_cls = AdamClonedWeightPredictionForAggregationWithWD
else:
raise NotImplementedError()
return pred_cls(optimizer, fix_fn=None, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
|
def adam_init(optimizer):
for pg in optimizer.param_groups:
for p in pg['params']:
state = optimizer.state[p]
if (len(state) == 0):
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['step'] = 0
|
class AdamClonedWeightPrediction(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
adam_init(self.optimizer)
def forward(self):
if (not self.n_steps):
return
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
if (self.scheduler is not None):
step_lrs = self.scheduler.get_next(self.n_steps)
pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))]
else:
pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs]
with torch.no_grad():
for (pg, step_lrs) in zip(pgs, pg_step_lrs):
(beta1, beta2) = pg['betas']
eps = pg['eps']
for p in pg['params']:
state = self.optimizer.state[p]
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
step = state['step']
for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs):
if (lr == 0):
continue
bias_correction1 = (1 - (beta1 ** (step + staleness)))
bias_correction2 = (1 - (beta2 ** (step + staleness)))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
p.data.addcdiv_((exp_avg * (beta1 ** staleness)), denom, value=(- step_size))
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed()
|
class AdamClonedWeightPredictionWithWD(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
adam_init(self.optimizer)
def forward(self):
if (not self.n_steps):
return
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
if (self.scheduler is not None):
step_lrs = self.scheduler.get_next(self.n_steps)
pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))]
else:
pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs]
with torch.no_grad():
for (pg, step_lrs) in zip(pgs, pg_step_lrs):
(beta1, beta2) = pg['betas']
eps = pg['eps']
weight_decay = pg['weight_decay']
for p in pg['params']:
state = self.optimizer.state[p]
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
step = state['step']
exp_avg_hat = exp_avg
for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs):
d_p = 0
if (weight_decay != 0):
d_p = (weight_decay * p.data)
exp_avg_hat = ((exp_avg_hat * beta1) + ((1 - beta1) * d_p))
bias_correction1 = (1 - (beta1 ** (step + staleness)))
bias_correction2 = (1 - (beta2 ** (step + staleness)))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
p.data.addcdiv_(exp_avg_hat, denom, value=(- step_size))
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed()
|
class AdamClonedWeightPredictionForAggregationWithWD(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
adam_init(self.optimizer)
def forward(self):
if (not self.n_steps):
return
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
if (self.scheduler is not None):
step_lrs = self.scheduler.get_next(self.n_steps)
pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))]
else:
pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs]
with torch.no_grad():
for (pg, step_lrs) in zip(pgs, pg_step_lrs):
(beta1, beta2) = pg['betas']
eps = pg['eps']
for p in pg['params']:
state = self.optimizer.state[p]
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
step = state['step']
weight_decay = pg['weight_decay']
exp_avg_hat = exp_avg
for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs):
if (lr == 0):
continue
d_p = (0 if ((p.grad is None) or (staleness > 1)) else p.grad)
if (weight_decay != 0):
d_p += (weight_decay * p.data)
exp_avg_hat = ((exp_avg_hat * beta1) + ((1 - beta1) * d_p))
bias_correction1 = (1 - (beta1 ** (step + staleness)))
bias_correction2 = (1 - (beta2 ** (step + staleness)))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
p.data.addcdiv_(exp_avg_hat, denom, value=(- step_size))
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed()
|
def get_adamw_weight_predictor(pred_mem: str, pred_type: str, optimizer, scheduler=None, nag_with_predictor=False, true_weights_storage=None) -> WeightPredictor:
has_weight_decay = any([(pg['weight_decay'] != 0) for pg in optimizer.param_groups])
if has_weight_decay:
if (pred_type == 'msnag'):
pred_cls = AdamWClonedWeightPrediction
elif (pred_type == 'aggmsnag'):
pred_cls = AdamWClonedWeightPredictionForAggregation
else:
raise NotImplementedError()
else:
warnings.warn('using Adam weight prediciton instad of AdamW becuse weight decay is 0')
return get_adam_weight_predictor(pred_mem, pred_type, optimizer, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
return pred_cls(optimizer, fix_fn=None, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
|
class AdamWClonedWeightPrediction(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
adam_init(self.optimizer)
def forward(self):
if (not self.n_steps):
return
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
if (self.scheduler is not None):
step_lrs = self.scheduler.get_next(self.n_steps)
pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))]
else:
pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs]
with torch.no_grad():
for (pg, step_lrs) in zip(pgs, pg_step_lrs):
(beta1, beta2) = pg['betas']
eps = pg['eps']
weight_decay = pg['weight_decay']
for p in pg['params']:
state = self.optimizer.state[p]
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
step = state['step']
for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs):
if (lr == 0):
continue
p.data.mul_((1 - (lr * weight_decay)))
bias_correction1 = (1 - (beta1 ** (step + staleness)))
bias_correction2 = (1 - (beta2 ** (step + staleness)))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
p.data.addcdiv_((exp_avg * (beta1 ** staleness)), denom, value=(- step_size))
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed()
|
class AdamWClonedWeightPredictionForAggregation(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
adam_init(self.optimizer)
def forward(self):
if (not self.n_steps):
return
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
if (self.scheduler is not None):
step_lrs = self.scheduler.get_next(self.n_steps)
pg_step_lrs = [[slr[i] for slr in step_lrs] for i in range(len(pgs))]
else:
pg_step_lrs = [([pg['lr']] * self.n_steps) for pg in pgs]
with torch.no_grad():
for (pg, step_lrs) in zip(pgs, pg_step_lrs):
(beta1, beta2) = pg['betas']
eps = pg['eps']
weight_decay = pg['weight_decay']
for p in pg['params']:
state = self.optimizer.state[p]
exp_avg = state['exp_avg']
exp_avg_sq = state['exp_avg_sq']
step = state['step']
exp_avg_hat = exp_avg
for (staleness, lr) in zip(range(1, (self.n_steps + 1)), step_lrs):
if (lr == 0):
continue
d_p = (0 if ((p.grad is None) or (staleness > 1)) else p.grad)
p.data.mul_((1 - (lr * weight_decay)))
exp_avg_hat = ((exp_avg_hat * beta1) + ((1 - beta1) * d_p))
bias_correction1 = (1 - (beta1 ** (step + staleness)))
bias_correction2 = (1 - (beta2 ** (step + staleness)))
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = (lr / bias_correction1)
p.data.addcdiv_(exp_avg_hat, denom, value=(- step_size))
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed()
|
class CowDict(MutableMapping):
def __init__(self, base: dict):
self.base = base
self.dict = {}
self.deleted_keys = set()
def __getitem__(self, key):
if (key in self.deleted_keys):
raise KeyError(key)
try:
return self.dict[key]
except KeyError:
return self.base[key]
def __setitem__(self, key, value):
try:
self.deleted_keys.remove(key)
except KeyError:
pass
self.dict[key] = value
def __delitem__(self, key):
if (key in self.base):
try:
del self.dict[key]
except KeyError:
pass
self.deleted_keys.add(key)
elif (key in self.dict):
del self.dict[key]
self.deleted_keys.add(key)
else:
raise KeyError(key)
def __len__(self):
return len((set(self.dict.keys()).union(set(self.base.keys())) - self.deleted_keys))
def __iter__(self):
for key in self.dict:
if (key not in self.deleted_keys):
(yield key)
for key in self.base:
if ((key not in self.dict) and (key not in self.deleted_keys)):
(yield key)
def __repr__(self):
retval = ['{']
for (key, value) in self.items():
retval.append(repr(key))
retval.append(': ')
retval.append(repr(value))
retval.append(', ')
del retval[(- 1)]
retval.append('}')
return ''.join(retval)
|
class WeightPredictor(abc.ABC):
def __init__(self, optimizer, fix_fn=None, scheduler=None, nag_with_predictor=False, true_weights_storage=None):
self.optimizer = optimizer
self.fix_fn = fix_fn
self.scheduler = scheduler
self.nag_with_predictor = nag_with_predictor
if nag_with_predictor:
print('-I- Doing NAG with predictor')
self.true_weights_storage = true_weights_storage
def setup(self, n_steps):
if ((n_steps == 0) and self.nag_with_predictor):
n_steps = 1
self.n_steps = n_steps
@abc.abstractmethod
def forward(self):
raise NotImplementedError()
@abc.abstractmethod
def revert(self):
raise NotImplementedError()
|
class FixFunction(abc.ABC):
@abc.abstractmethod
def __call__(self, p: WeightPredictor, pg):
raise NotImplementedError()
|
def get_sched_predictor(optimizer, sched_creator_cls, **kw):
' Get sched predictor from optimizer and scheduler class and kwargs '
n_param_groups = len(optimizer.param_groups)
lrs = [pg['lr'] for pg in optimizer.param_groups]
d = {'lrs': lrs, 'sched_creator_cls': sched_creator_cls, 'n_param_groups': n_param_groups}
d = {**d, **kw}
return SchedulerPredictor(**d)
|
def dummy_optimizer(lrs, n_param_groups=1):
' Dummy optimizer with dummy model '
assert (len(lrs) == n_param_groups)
model = nn.Linear(1, 1, bias=False)
optimizer = optim.SGD(model.parameters(), lrs[0])
for i in range(1, n_param_groups):
model = nn.Linear(1, 1, bias=False)
optimizer.add_param_group({'params': model.parameters(), 'lr': lrs[i]})
return optimizer
|
class SchedulerPredictor():
def __init__(self, lrs, sched_creator_cls, *args, n_param_groups=0, **kw):
optimizer = dummy_optimizer(lrs=lrs, n_param_groups=n_param_groups)
scheduler = sched_creator_cls(optimizer, *args, **kw)
optimizer.step()
self.scheduler = scheduler
self.q = deque()
self.q.append(self.scheduler.get_last_lr())
self.scheduler.step()
def get_next(self, n_next):
while (len(self.q) < n_next):
self.q.append(self.scheduler.get_last_lr())
self.scheduler.step()
res = list(itertools.islice(self.q, 0, n_next))
return res
def patch_scheduler(self, scheduler):
q = self.q
dummy_sched = self.scheduler
def step_decorator(func):
@wraps(func)
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
q.append(dummy_sched.get_last_lr())
dummy_sched.step()
q.popleft()
return types.MethodType(inner, scheduler)
scheduler.step = step_decorator(scheduler.step.__func__)
print(f'-I- patched scheduler to update sched-aware predictor on step()')
|
class SGDRevertableLinearWeightPrediction(WeightPredictor):
def __init__(self, *args, **kw):
raise NotImplementedError('SGDRevertableLinearWeightPrediction not yet supported for pipeline')
super().__init__(*args, **kw)
def forward(self):
if (not self.n_steps):
return
with torch.no_grad():
self.buffered_fixes = [self.fix_fn(self, pg) for pg in self.optimizer.param_groups]
for (pg, fix_fn_item) in zip(self.optimizer.param_groups, self.buffered_fixes):
if fix_fn_item:
for p in pg['params']:
p.add_((- fix_fn_item), self.optimizer.state[p]['momentum_buffer'])
def revert(self):
if (not self.n_steps):
return
with torch.no_grad():
for (pg, fix_fn_item) in zip(self.optimizer.param_groups, self.buffered_fixes):
if fix_fn_item:
for p in pg['params']:
p.add_(fix_fn_item, self.optimizer.state[p]['momentum_buffer'])
|
class SGDClonedWeightPrediction(WeightPredictor):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
for pg in self.optimizer.param_groups:
for p in pg['params']:
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p)
def forward(self):
if (not self.n_steps):
return
os_state = self.optimizer.state
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
with torch.no_grad():
buffered_fixes = [self.fix_fn(self, pg) for pg in pgs]
for (pg, fix_fn_item) in zip(pgs, buffered_fixes):
for p in pg['params']:
p.add_((- fix_fn_item), os_state[p]['momentum_buffer'])
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed()
|
class SGD2MSNAG(FixFunction):
' \n SGD version mention in Sutskever et al, also used in tensorflow.\n Mentioned as eq 10 Goyal et al.\n Fixed with MSNAG\n '
def __call__(self, p: WeightPredictor, pg):
gamma = pg['momentum']
if (p.n_steps == 1):
return gamma
return ((gamma - math.pow(gamma, (p.n_steps + 1))) / (1 - gamma))
|
class SGD1MSNAG(SGD2MSNAG):
' Pytorch SGD. Mentioned as eq 9 Goyal et al. '
def __call__(self, p: WeightPredictor, pg):
return (pg['lr'] * super().__call__(p, pg))
|
def get_sgd_weight_predictor(sgd_type: str, pred_mem: str, pred_type: str, optimizer, scheduler=None, nag_with_predictor=False, true_weights_storage=None) -> WeightPredictor:
has_weight_decay = any([(pg['weight_decay'] != 0) for pg in optimizer.param_groups])
if has_weight_decay:
if (pred_type == 'msnag'):
raise NotImplementedError(f'this is constantyly changed to to aggmsnag since it is better, use it instead. For measuring msnag alone - change the code')
if (pred_type != 'aggmsnag'):
raise NotImplementedError()
if (sgd_type == 'sgd1'):
if (pred_mem == 'clone'):
return SGDWDClonedWeightPrediction(optimizer, fix_fn=None, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
else:
if (pred_type != 'msnag'):
raise NotImplementedError(pred_type)
fix_fn_cls = SGD_TYPE_TO_MSNAG_CLASS.get(sgd_type, None)
fix_fn = fix_fn_cls()
pred_cls = PRED_MEM_TO_CLASS.get(pred_mem, None)
return pred_cls(optimizer, fix_fn=fix_fn, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
|
class SGDWDClonedWeightPrediction(WeightPredictor):
' Pytorch SGD. Mentioned as eq 9 Goyal et al.\n Used msnag to predict, including weight decay.\n '
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
for pg in self.optimizer.param_groups:
for p in pg['params']:
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p)
MAX_ALLOWEDD_STALENESS = 8
(res, _) = auto_lambdify(MAX_ALLOWEDD_STALENESS, WDSympySGDMsnag, simplify=True)
self.res = res
def setup(self, n_steps):
self.n_steps = n_steps
def forward(self):
if ((self.n_steps == 0) and self.nag_with_predictor):
self.n_steps = 1
os_state = self.optimizer.state
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
with torch.no_grad():
for pg in self.optimizer.param_groups:
lr = pg['lr']
if (lr == 0):
continue
momentum = pg['momentum']
for p in pg['params']:
p.data.add_(((- lr) * momentum), os_state[p]['momentum_buffer'])
if (p.grad is not None):
p.data.add_(p.grad, alpha=(- lr))
return
if (not self.n_steps):
return
res = self.res[self.n_steps]
res_v = res['v']
res_theta = res['theta']
f_v = res_v['f']
f_theta = res_theta['f']
fs_v = res_v['free_symbols']
fs_theta = res_theta['free_symbols']
res_first_grad = res['\\phi']
fs_first_grad = res_first_grad['free_symbols']
f_first_grad = res_first_grad['f']
os_state = self.optimizer.state
self.true_weights_storage.create_cloned_if_needed()
self.true_weights_storage.record_change_mode('pred')
pgs = self.optimizer.param_groups
with torch.no_grad():
for pg in pgs:
d = {'\\eta': pg['lr'], '\\gamma': pg['momentum'], '\\lambda': pg['weight_decay']}
coeff_v = f_v(*[d[a] for a in fs_v])
coeff_theta = f_theta(*[d[a] for a in fs_theta])
coeff_first_grad = f_first_grad(*[d[a] for a in fs_first_grad])
for p in pg['params']:
p.data.mul_(coeff_theta).add_(os_state[p]['momentum_buffer'], alpha=coeff_v)
if (p.grad is not None):
p.data.add_(p.grad, alpha=coeff_first_grad)
def revert(self):
if (not self.n_steps):
return
self.true_weights_storage.restore_if_needed()
|
def lambdify_dict(coeff):
' Lambidfy the given expression, returns a dict describing result '
free_symbols_list = sorted(coeff.free_symbols, key=str)
f = lambdify(free_symbols_list, coeff, modules=['math'])
free_symbols_names = sorted(map(str, free_symbols_list))
generated_dict = dict(f=f, free_symbols=free_symbols_names, coeff_expr=coeff)
return generated_dict
|
def auto_lambdify_delay_1(optimizer_class, simplify=False, allow_no_coeff=False):
(_, preds, gaps) = run_sim(1, optimizer_class, simplify=simplify)
gap = gaps[0]
pred = preds[0]
fs_gap = list(gap.free_symbols)
fs_pred = list(pred.free_symbols)
f = lambdify(fs_gap, gap, modules=['math'])
dict(inspect.signature(f).parameters)
|
def auto_lambdify(max_staleness, optimizer_class, simplify=False, allow_no_coeff=False):
' Auto generating functions for coefficients\n\n Example:\n Calculate the coefficnt of v.\n (Assuming "v" is in optimizer_class.collect_order)\n\n res, gap_res = auto_lambdify(...)\n\n # given stalnees some dict mapping parameters\n staleness = 1\n d = {"\\eta": 0.1, "\\gamma": 0.9}\n\n\n f = res[staleness]["v"][\'f\']\n required_args = res[staleness]["v"]["free_symbols"]\n # print(required_args)\n values = [d[a] for a in required_args]\n print(f(*values))\n\n '
(_, preds, gaps) = run_sim(max_staleness, optimizer_class, simplify=simplify)
res = defaultdict(dict)
for (idx, expr) in enumerate(preds):
curr_staleness = (idx + 1)
expr = expr.expand()
symbols = optimizer_class.collect_order
symbols = list(map(Symbol, symbols))
for s in symbols:
expr = expr.collect(s)
for s in symbols:
coeff = expr.coeff(s)
if (not coeff):
if (not allow_no_coeff):
raise NotImplementedError(f"can't find {s} coeff in {expr}. Do it manually.")
generated_dict = lambdify_dict(coeff)
res[curr_staleness].update({str(s): generated_dict})
gap_res = dict()
grad = tplus_time('g', 0)
gap1_expr = gaps[0].collect(grad).coeff(grad)
generated_dict = lambdify_dict(gap1_expr)
gap_res['gap_1'] = generated_dict
return (res, gap_res)
|
def calc_gap(theta_true, theta_pred, simplify=True):
gap = (theta_true - theta_pred)
if simplify:
gap = gap.simplify()
return gap
|
def tplus_time(s, time):
if (time == 0):
return Symbol((str(s) + '_{t}'))
return Symbol((((str(s) + '_{t+') + f'{time}') + '}'))
|
class SympyPredictingOptimizer(ABC):
@abstractmethod
def step(self):
pass
@abstractmethod
def prediction(self, nsteps):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.