code stringlengths 17 6.64M |
|---|
class Adam(Optimizer):
'Implements Adam algorithm.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_step_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
if (group['weight_decay'] != 0):
grad.add_(group['weight_decay'], p.data)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
torch.div(exp_avg, denom, out=denom)
p.data.add_((- step_size), denom)
state['exp_step_avg_sq'].mul_(beta2).addcmul_(((1 - beta2) * math.pow(step_size, 2)), denom, denom)
return loss
|
class Adam(Optimizer):
'Implements Adam algorithm.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_step_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
if (group['weight_decay'] != 0):
grad.add_(group['weight_decay'], p.data)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
torch.div(exp_avg, denom, out=denom)
p.data.add_((- step_size), denom)
t = (1 / bias_correction1)
state['exp_step_avg_sq'].mul_(beta2).addcmul_(((1 - beta2) * math.pow(t, 2)), denom, denom)
return loss
|
class AdamW(Optimizer):
'Implements AdamW algorithm.\n\n The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.\n The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay coefficient (default: 1e-2)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (not (0.0 <= weight_decay)):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
@torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
p.mul_((1 - (group['lr'] * group['weight_decay'])))
grad = p.grad
if grad.is_sparse:
raise RuntimeError('AdamW does not support sparse gradients')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
p.data.addcdiv_(exp_avg, denom, value=(- step_size))
return loss
|
class AdamW(Optimizer):
'Implements AdamW algorithm.\n The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.\n The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay coefficient (default: 1e-2)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
p.data.mul_((1 - (group['lr'] * group['weight_decay'])))
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_step_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
torch.div(exp_avg, denom, out=denom)
p.data.add_((- step_size), denom)
state['exp_step_avg_sq'].mul_(beta2).addcmul_(((1 - beta2) * math.pow(step_size, 2)), denom, denom)
return loss
|
class AdamW(Optimizer):
'Implements AdamW algorithm.\n The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.\n The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay coefficient (default: 1e-2)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
p.data.mul_((1 - (group['lr'] * group['weight_decay'])))
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_step_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = (group['lr'] / bias_correction1)
torch.div(exp_avg, denom, out=denom)
p.data.add_((- step_size), denom)
state['exp_step_avg_sq'].mul_(beta2).addcmul_((1 - beta2), denom, denom)
return loss
|
def get_multi_step_lr_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps, milestones, gamma, last_epoch=(- 1)):
' Create a schedule with a learning rate that decreases with gamma factor every milestone, after\n linearly increasing during a warmup period.\n user responsibility to assure that each milestone is bigger than num_warmup steps.\n '
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return (gamma ** bisect_right(milestones, current_step))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
class WarmupMultiStepLR(LambdaLR):
' Create a schedule with a learning rate that decreases with gamma factor every milestone, after\n linearly increasing during a warmup period.\n user responsibility to assure that each milestone is bigger than num_warmup steps.\n '
def __init__(self, optimizer, num_warmup_steps, milestones, gamma, last_epoch=(- 1)):
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return (gamma ** bisect_right(milestones, current_step))
super().__init__(optimizer, lr_lambda, last_epoch)
|
class _RequiredParameter(object):
'Singleton class representing a required parameter for an Optimizer.'
def __repr__(self):
return '<required parameter>'
|
class SGD(Optimizer):
'Implements stochastic gradient descent (optionally with momentum).\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, the update can be written as\n\n .. math::\n \\begin{aligned}\n v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\\\\n p_{t+1} & = p_{t} - \\text{lr} * v_{t+1},\n \\end{aligned}\n\n where :math:`p`, :math:`g`, :math:`v` and :math:`\\mu` denote the \n parameters, gradient, velocity, and momentum respectively.\n\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n\n .. math::\n \\begin{aligned}\n v_{t+1} & = \\mu * v_{t} + \\text{lr} * g_{t+1}, \\\\\n p_{t+1} & = p_{t} - v_{t+1}.\n \\end{aligned}\n\n The Nesterov version is analogously modified.\n '
def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (momentum < 0.0):
raise ValueError('Invalid momentum value: {}'.format(momentum))
if (weight_decay < 0.0):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
if (nesterov and ((momentum <= 0) or (dampening != 0))):
raise ValueError('Nesterov momentum requires a momentum and zero dampening')
super(SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if (p.grad is None):
continue
d_p = p.grad
if (weight_decay != 0):
d_p = d_p.add(p, alpha=weight_decay)
if (momentum != 0):
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=(1 - dampening))
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.data.add_(d_p, alpha=(- group['lr']))
return loss
|
class SGD(Optimizer):
'Implements stochastic gradient descent (optionally with momentum).\n\n With Momentum Correction\n See https://arxiv.org/pdf/1706.02677.pdf\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n This is the same implementtation as Sutskever et. al.\n\n .. math::\n v = \\rho * v + lr * g \\\\\n p = p - v\n\n Normal Pytorch implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, their update can be written as\n\n .. math::\n v = \\rho * v + g \\\\\n p = p - lr * v\n\n where p, g, v and :math:`\\rho` denote the parameters, gradient,\n velocity, and momentum respectively.\n\n The Nesterov version is analogously modified.\n '
def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False, momentum_correction=False):
if ((lr is not required) and (lr < 0.0)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (momentum < 0.0):
raise ValueError('Invalid momentum value: {}'.format(momentum))
if (weight_decay < 0.0):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
if (nesterov and ((momentum <= 0) or (dampening != 0))):
raise ValueError('Nesterov momentum requires a momentum and zero dampening')
super(SGD, self).__init__(params, defaults)
self.momentum_correction = momentum_correction
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
if (self.momentum_correction and ('prev_lr' in group) and group['prev_lr']):
momentum *= (group['lr'] / group['prev_lr'])
group['prev_lr'] = group['lr']
for p in group['params']:
if (p.grad is None):
continue
d_p = p.grad.data
if (weight_decay != 0):
d_p.add_(weight_decay, p.data)
d_p.mul_(group['lr'])
if (momentum != 0):
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=(1 - dampening))
if nesterov:
d_p = d_p.add(buf)
else:
d_p = buf
p.data.add_(d_p, alpha=(- 1))
return loss
|
def linear_lr_scaling(bs_train, BASE_LR, BASE_BS_TRAIN, downscale=False):
if (bs_train < BASE_BS_TRAIN):
if (not downscale):
return BASE_LR
else:
lr = (BASE_LR / (BASE_BS_TRAIN / bs_train))
else:
lr = (BASE_LR * (bs_train / BASE_BS_TRAIN))
assert (lr > 0)
return lr
|
class CommPolicy(Enum):
P2P = auto()
BCAST = auto()
|
def to_policy(backend, cpu):
assert (backend in {'nccl', 'gloo', 'mpi'})
if ((backend == 'mpi') or cpu):
return CommPolicy.P2P
raise NotImplementedError()
|
def get_auto_comm_handler_cls(backend, cpu):
return POLICY_TO_COMM[to_policy(backend, cpu)]
|
def zero_grad_fn(g):
return
for b in g:
b.detach_().zero_()
|
class PreProcIter():
def __init__(self, itr, preproc_fn):
self.itr = itr
self.preproc_fn = preproc_fn
def __next__(self):
x = next(self.itr)
self.preproc_fn(x)
return x
def __iter__(self):
raise NotImplementedError()
|
class Buffers():
def __init__(self, max_buffers, create_fn, irecv_fn, is_grad=False, prev_stream_to_use: Optional[torch.cuda.Stream]=None):
self.buffers = []
self.create_fn = create_fn
self.max_buffers = max_buffers
self._is_initialized = False
self.irecv_fn = irecv_fn
self.handlers = deque()
self.pointer = 0
self.is_grad = is_grad
self.last_irecv = None
if (prev_stream_to_use is not None):
self.clone_stream = prev_stream_to_use
else:
self.clone_stream = torch.cuda.Stream(priority=(- 1))
def create(self):
self._is_initialized = True
with torch.cuda.stream(self.clone_stream):
for i in range(self.max_buffers):
self.buffers = [self.create_fn() for _ in range(self.max_buffers)]
self.clone_stream.synchronize()
return self.reset_state()
def replace_next(self):
with torch.cuda.stream(self.clone_stream):
self.buffers[self.pointer] = self.create_fn()
self.clone_stream.synchronize()
def reset_state(self):
if self.is_grad:
self.itr = PreProcIter(cycle(self.buffers), zero_grad_fn)
else:
self.itr = cycle(self.buffers)
self.pointer = 0
self.first_rcv_after_created = True
self.last_irecv = None
return self
def is_initialized(self):
return self._is_initialized
def recv_next(self, batch_idx):
' Do Irecv_fn on next buffer '
if (self.last_irecv != batch_idx):
self.handlers.append(self.irecv_fn(next(self.itr), batch_idx))
self.last_irecv = batch_idx
def wait_first(self):
' Wait for the first Irecv_fn to finish '
request_objects = self.handlers.popleft()
res = []
bres = self.buffers[self.pointer]
with torch.cuda.stream(self.clone_stream):
with torch.no_grad():
for (obj, v) in zip(request_objects, bres):
obj.wait()
if isinstance(v, torch.Tensor):
res.append(v.clone())
else:
res.append(v)
self.pointer = ((self.pointer + 1) % self.max_buffers)
self.clone_stream.synchronize()
return res
|
class BufferSimpleCommBase(SimpleCommBase):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def _create_recv_buffers(self, tensor_ranks, for_grads=False):
with torch.no_grad():
buffers = []
for (tensor_name, ranks) in tensor_ranks:
dtype = self.tensor_dtypes[tensor_name]
shape = self.tensor_shapes[tensor_name]
if (not isinstance(dtype, torch.dtype)):
if (dtype is None):
_tmp = None_tensor()
dtype = _tmp.dtype
shape = _tmp.shape
elif issubclass(dtype, (list, tuple)):
if (shape is not None):
dtype = torch.int64
else:
raise NotImplementedError('we expect shape for torch.Size() since it will be converted to tensor')
else:
_tmp = torch.tensor(dtype())
dtype = _tmp.dtype
shape = _tmp.shape
if (len(ranks) > 1):
print(f'-V- creating double buffers for {tensor_name} which is sent/received to/from multiple ranks: {ranks}')
assert for_grads
for _ in ranks:
try:
rcv_buffer = torch.zeros(shape, dtype=dtype, device=self.device, requires_grad=False)
except TypeError as e:
print(f'problem with {tensor_name}, shape:{shape}, dtype={dtype}')
raise e
rcv_buffer.share_memory_()
buffers.append(rcv_buffer)
return buffers
def create_activations_recv_buffers(self):
return self._create_recv_buffers(tensor_ranks=self.activations_rcv_items, for_grads=False)
def create_gradients_rcv_buffers(self):
return self._create_recv_buffers(tensor_ranks=self.grad_rcv_items_without_extention, for_grads=True)
def init_buffers(self):
training_tensor_shapes = self.training_tensor_shapes
eval_tensor_shapes = self.eval_tensor_shapes
training_tensor_dtypes = self.training_tensor_dtypes
eval_tensor_dtypes = self.eval_tensor_dtypes
last_batch_train_shapes = self.last_batch_train_shapes
last_batch_test_shapes = self.last_batch_test_shapes
keep_buffers_alive = self.keep_buffers_alive
shapes_are_equal = (eval_tensor_shapes == training_tensor_shapes)
dtypes_are_equal = (eval_tensor_dtypes == training_tensor_dtypes)
dtypes_and_shapes_are_equal = (shapes_are_equal and dtypes_are_equal)
no_different_last_batch_shapes = ((last_batch_train_shapes is None) and (last_batch_test_shapes is None))
if (dtypes_and_shapes_are_equal and no_different_last_batch_shapes):
keep_buffers_alive = True
self.keep_buffers_alive = keep_buffers_alive
fwd_recv_buffers_train = self._fwd_recv_buffers_train(create=False)
bwd_recv_buffers = self._bwd_recv_buffers()
if keep_buffers_alive:
self.fwd_recv_buffers_train = fwd_recv_buffers_train
self.bwd_recv_buffers = bwd_recv_buffers
if (not dtypes_and_shapes_are_equal):
self.fwd_recv_buffers_eval = self._fwd_recv_buffers_eval(create=False)
else:
self.fwd_recv_buffers_eval = self.fwd_recv_buffers_train
else:
self.fwd_recv_buffers = fwd_recv_buffers_train
self.bwd_recv_buffers = bwd_recv_buffers
self._last_pre_recv_gradients = None
def get_data_forward(self, batch_idx, num_batches, last_due_end):
self._ensure_fwd_recv_buffers_size_set(last_due_end)
fwd_recv_buffers = self.fwd_recv_buffers
fwd_recv_buffers.recv_next(batch_idx)
x = fwd_recv_buffers.wait_first()
if ((fwd_recv_buffers.max_buffers > 1) and (not last_due_end)):
next_last_due_end = ((batch_idx + 2) == num_batches)
self._ensure_fwd_recv_buffers_size_set(last_due_end=next_last_due_end)
fwd_recv_buffers.recv_next((batch_idx + 1))
x = self.fix_after_recv(x)
return x
def pre_recv_gradients(self, batch_idx, num_batches, last_due_end):
' Used to start the recv before recomputation.\n Called at the beginning of "backward"\n # TODO: can start it earlier, after the forward send.\n The implication of not doing so is slightly longer first bwd recv.\n this is bearable for better code simplicity.\n # FIXME: num_batches is redunent\n '
if (self._last_pre_recv_gradients == batch_idx):
return
self._ensure_bwd_recv_buffers_size_set(last_due_end)
self.bwd_recv_buffers.recv_next(batch_idx)
self._last_pre_recv_gradients = batch_idx
def wait_recv_gradients(self, *args):
g = self.bwd_recv_buffers.wait_first()
g = self.fix_after_recv(g, True)
return g
def eval(self):
'Sets evaluation mode.\n Also handles the transition : train -> eval\n '
self.training = False
self.set_tensor_shapes(self.eval_tensor_shapes)
self.set_tensor_dtypes(self.eval_tensor_dtypes)
if self.keep_buffers_alive:
self.fwd_recv_buffers = self.fwd_recv_buffers_eval.reset_state()
elif self.changed_shapes_last_batch_fwd:
self.changed_shapes_last_batch_fwd = False
self.fwd_recv_buffers = self._fwd_recv_buffers_eval(create=True)
elif (not self.fwd_recv_buffers.is_initialized()):
self.fwd_recv_buffers.create()
def train(self):
self.training = True
self.set_tensor_shapes(self.training_tensor_shapes)
self.set_tensor_dtypes(self.training_tensor_dtypes)
if self.keep_buffers_alive:
self.fwd_recv_buffers = self.fwd_recv_buffers_train.reset_state()
self.bwd_recv_buffers.reset_state()
else:
if self.changed_shapes_last_batch_fwd:
self.changed_shapes_last_batch_fwd = False
self.fwd_recv_buffers = self._fwd_recv_buffers_train(create=True)
elif (not self.fwd_recv_buffers.is_initialized()):
self.fwd_recv_buffers.create()
if self.changed_shapes_last_batch_bwd:
self.changed_shapes_last_batch_bwd = False
self.bwd_recv_buffers = self._bwd_recv_buffers()
else:
self.bwd_recv_buffers.reset_state()
def _ensure_fwd_recv_buffers_size_set(self, last_due_end):
if (last_due_end and ((self.training and self.last_batch_train_shapes) or ((not self.training) and self.last_batch_test_shapes))):
print(f'rank: {self.rank} replacing buffers for last batch, forward')
shapes = (self.last_batch_train_shapes if self.training else self.last_batch_test_shapes)
dtypes = (self.training_tensor_dtypes if self.training else self.eval_tensor_dtypes)
if (self.fwd_recv_buffers.max_buffers == 1):
self.changed_shapes_last_batch_fwd = True
s = self.fwd_recv_buffers.clone_stream
del self.fwd_recv_buffers
fwd_recv_buffers = make_buff(self, dtypes=dtypes, max_buffers=1, shapes=shapes, is_bwd=False, create=False, prev_stream_to_use=s)
self.fwd_recv_buffers = fwd_recv_buffers
else:
fwd_recv_buffers = self.fwd_recv_buffers
self.set_tensor_shapes(shapes)
self.set_tensor_dtypes(dtypes)
fwd_recv_buffers.replace_next()
self.changed_shapes_last_batch_fwd = True
else:
fwd_recv_buffers = self.fwd_recv_buffers
if (not fwd_recv_buffers.is_initialized()):
fwd_recv_buffers.create()
def _ensure_bwd_recv_buffers_size_set(self, last_due_end):
if (last_due_end and self.last_batch_train_shapes):
print(f'stage: {self.stage} replacing buffers for last batch, backward')
self.changed_shapes_last_batch_bwd = True
shapes = self.last_batch_train_shapes
dtypes = self.training_tensor_dtypes
if (self.bwd_recv_buffers.max_buffers == 1):
s = self.bwd_recv_buffers.clone_stream
del self.bwd_recv_buffers
bwd_recv_buffers = make_buff(self, dtypes=dtypes, max_buffers=1, shapes=shapes, is_bwd=True, create=False, prev_stream_to_use=s)
self.bwd_recv_buffers = bwd_recv_buffers
else:
bwd_recv_buffers = self.bwd_recv_buffers
self.set_tensor_shapes(shapes)
self.set_tensor_dtypes(dtypes)
bwd_recv_buffers.replace_next()
if (not isinstance(self.changed_shapes_last_batch_bwd, dict)):
self.changed_shapes_last_batch_bwd = dict()
self.changed_shapes_last_batch_bwd[bwd_recv_buffers.pointer] = True
elif self.changed_shapes_last_batch_bwd:
if (self.bwd_recv_buffers.max_buffers == 1):
self.changed_shapes_last_batch_bwd = False
bwd_recv_buffers = self._bwd_recv_buffers()
self.bwd_recv_buffers = bwd_recv_buffers
else:
bwd_recv_buffers = self.bwd_recv_buffers
assert isinstance(self.changed_shapes_last_batch_bwd, dict)
if self.changed_shapes_last_batch_bwd[bwd_recv_buffers.pointer]:
self.changed_shapes_last_batch_bwd.pop(bwd_recv_buffers.pointer)
shapes = (self.training_tensor_shapes,)
dtypes = self.training_tensor_dtypes
self.set_tensor_shapes(shapes)
self.set_tensor_dtypes(dtypes)
bwd_recv_buffers.replace_next()
else:
bwd_recv_buffers = self.bwd_recv_buffers
if (not bwd_recv_buffers.is_initialized()):
bwd_recv_buffers.create()
def _fwd_recv_buffers_train(self, create=False):
return make_buff(self, dtypes=self.training_tensor_dtypes, max_buffers=self.max_buffers, shapes=self.training_tensor_shapes, is_bwd=False, create=create)
def _fwd_recv_buffers_eval(self, create=False):
return make_buff(self, dtypes=self.eval_tensor_dtypes, max_buffers=self.max_buffers, shapes=self.eval_tensor_shapes, is_bwd=False, create=create)
def _bwd_recv_buffers(self):
return make_buff(self, dtypes=self.training_tensor_dtypes, max_buffers=self.max_buffers, shapes=self.training_tensor_shapes, is_bwd=True, create=True)
def create_futures_handler(self, *args, **kw):
self.futures_handler = FuturesHandler(self.pipe_config, self.stage)
return self.futures_handler
|
class FuturesHandler(FuturesHandlerBase):
' This is mostly for MPI, where sent objects are problematic - currently not deleted automatically '
def __init__(self, pipe_config: PipelineConfig, my_stage_id):
super().__init__()
patience = pipe_config.max_send_depth_for_stage(my_stage_id)
self.true_patience = patience
self.warmup_patience = patience
pipeline_depth = pipe_config.pipeline_depth
if (patience > 1):
warnings.warn(f'stage {my_stage_id}: Got max_send_depth_for_stage {patience}, but setting to pipeline_depth={pipeline_depth} at warmup, for safety')
self.warmup_patience = pipeline_depth
patience = pipeline_depth
print(f'-V- stage: {my_stage_id}, sent_object_patience: {patience}')
self.sent_object_patience = patience
self.warmup_count = patience
self.async_fwd_objects = OrderedDict()
self.async_bwd_objects = OrderedDict()
stage_depth = pipe_config.get_depth_for_stage(my_stage_id)
self.is_first_partition = (stage_depth == (pipe_config.pipeline_depth - 1))
def after_forward(self, sent_request_objects, done_fwds, training):
if sent_request_objects:
if self.async_fwd_objects:
self.wait_on_sent_object(is_fwd=True)
self.async_fwd_objects[done_fwds] = sent_request_objects
if (self.warmup_count > 0):
self.warmup_count -= 1
if (self.warmup_count == 0):
self.sent_object_patience = self.true_patience
def after_backward(self, sent_request_objects, done_bwds):
if (not self.is_first_partition):
if self.async_bwd_objects:
self.wait_on_sent_object(is_fwd=False)
self.async_bwd_objects[done_bwds] = sent_request_objects
def clean_train(self):
async_fwd_objects = self.async_fwd_objects
async_bwd_objects = self.async_bwd_objects
wait_on_sent_object = self.wait_on_sent_object
while (len(async_fwd_objects) > 0):
wait_on_sent_object(is_fwd=True, fin=True)
while (len(async_bwd_objects) > 0):
wait_on_sent_object(is_fwd=False, fin=True)
self.sent_object_patience = self.warmup_patience
self.warmup_count = self.warmup_patience
def clean_eval(self):
async_fwd_objects = self.async_fwd_objects
wait_on_sent_object = self.wait_on_sent_object
while (len(async_fwd_objects) > 0):
wait_on_sent_object(is_fwd=True, fin=True)
self.sent_object_patience = self.warmup_patience
self.warmup_count = self.warmup_patience
def wait_on_sent_object(self, is_fwd, fin=False, clean_first=True):
obj_holder = (self.async_fwd_objects if is_fwd else self.async_bwd_objects)
if clean_first:
self.clean_sent_requests(obj_holder)
if (not obj_holder):
return
if ((not fin) and (len(obj_holder) <= self.sent_object_patience)):
return
(_, sent_request_objects) = obj_holder.popitem(last=False)
for i in sent_request_objects:
i.wait()
def clean_sent_requests(self, obj_holder):
to_del = []
for i in obj_holder:
a = obj_holder[i]
to_remove = [i for (i, r) in enumerate(a) if r.is_completed()]
for x in sorted(to_remove, reverse=True):
del a[x]
if (not a):
to_del.append(i)
else:
break
for i in sorted(to_del, reverse=True):
del obj_holder[i]
|
def make_buff(comm_handler: BufferSimpleCommBase, is_bwd, shapes, dtypes=None, max_buffers=1, create=False, prev_stream_to_use=None):
'Create recv buffer.\n TODO: This should be moved to comm handler\n '
comm_handler.set_tensor_shapes(shapes)
comm_handler.set_tensor_dtypes(dtypes)
if is_bwd:
b = Buffers(max_buffers, comm_handler.create_gradients_rcv_buffers, comm_handler.recv_gradients, is_grad=True, prev_stream_to_use=prev_stream_to_use)
else:
b = Buffers(max_buffers, comm_handler.create_activations_recv_buffers, comm_handler.recv_activations, is_grad=False, prev_stream_to_use=prev_stream_to_use)
if create:
b.create()
return b
|
class SimpleCommBase(CommunicationHandlerBase, ABC):
' common for all MPI based.\n TODO: some functions in the end should be moved to lower class\n '
def __init__(self, rank, local_rank, backend, world_size, num_stages, stage, receive_ranks, send_ranks, target_tensor_names, ranks_in_previous_stage, ranks_in_next_stage, req_grad, outputs_req_grad, pipe_config: PipelineConfig, cpu, num_chunks, device, GRAD_UGLY_SHAMEFUL_NAME='_grad', verbose=False):
super().__init__()
self.tensor_dtypes = None
for to_del in [receive_ranks, send_ranks]:
for inout in [pipe_config.d['model_inputs'], pipe_config.d['model_outputs']]:
for i in inout:
if (i in to_del):
del to_del[i]
assert isinstance(GRAD_UGLY_SHAMEFUL_NAME, str)
self.GRAD_UGLY_SHAMEFUL_NAME = GRAD_UGLY_SHAMEFUL_NAME
self.verbose = verbose
self.rank = rank
self.local_rank = local_rank
self.backend = backend
self.logger = logging.getLogger('msnag')
self.stage = stage
self.pipe_config = pipe_config
self.receive_ranks = receive_ranks
self.send_ranks = send_ranks
self.tensors_names_with_no_grad = set()
for (i, v) in req_grad.items():
assert isinstance(v, bool)
if isinstance(v, bool):
if (not v):
self.tensors_names_with_no_grad.add(i)
for (i, v) in outputs_req_grad.items():
assert isinstance(v, bool), str((i, v))
if (not v):
self.tensors_names_with_no_grad.add(i)
if target_tensor_names:
self.tensors_names_with_no_grad.update(target_tensor_names)
self.cpu = cpu
self.device = device
self.world_size = world_size
self.num_chunks = num_chunks
self.activations_rcv_items = list(self.receive_ranks.items())
self.grad_rcv_items_without_extention = [(i, v) for (i, v) in self.send_ranks.items() if (i not in self.tensors_names_with_no_grad)]
self.grad_send_items_without_extention = [(i, v) for (i, v) in self.receive_ranks.items() if (i not in self.tensors_names_with_no_grad)]
self.grad_rcv_items = [((i + GRAD_UGLY_SHAMEFUL_NAME), v) for (i, v) in self.send_ranks.items() if (i not in self.tensors_names_with_no_grad)]
self.grad_send_items = [((i + GRAD_UGLY_SHAMEFUL_NAME), v) for (i, v) in self.receive_ranks.items() if (i not in self.tensors_names_with_no_grad)]
self.grad_rcv_dict_without_extention = OrderedDict(self.grad_rcv_items_without_extention)
self.grad_send_dict_without_extention = OrderedDict(self.grad_send_items_without_extention)
self.grad_send_dict = OrderedDict(self.grad_send_items)
self.grad_rcv_dict = OrderedDict(self.grad_rcv_items)
tag_info = tensor_tags_from_config(pipe_config)
(self.tensor_tags, self.TOTAL_TAGS) = tag_info
if target_tensor_names:
self.ranks_in_next_stage = ranks_in_next_stage
self._register_target_tensor(target_tensor_names, ranks_in_previous_stage, ranks_in_next_stage)
def init_process_group(self, *args, **kw):
backend = self.backend
rank = self.rank
local_rank = self.local_rank
world_size = self.world_size
dist.init_process_group(backend)
assert (dist.get_world_size() == world_size)
self.logger.info(f'Initialized process group; backend: {backend}, rank: {rank}, local_rank: {local_rank}, world_size: {world_size}')
def _register_target_tensor(self, target_tensor_names, ranks_in_previous_stage, ranks_in_next_stage):
warnings.warn('Sending targets in pipeline is deprecated.')
for target_tensor_name in target_tensor_names:
if (len(ranks_in_previous_stage) > 0):
self.receive_ranks[target_tensor_name] = ranks_in_previous_stage
if (len(self.ranks_in_next_stage) > 0):
self.send_ranks[target_tensor_name] = ranks_in_next_stage
def set_tensor_shapes(self, tensor_shapes):
self.tensor_shapes = tensor_shapes
def set_tensor_dtypes(self, tensor_dtypes):
self.tensor_dtypes = tensor_dtypes
def init_buffers_ctx(self, buffers_ctx):
(training_tensor_shapes, eval_tensor_shapes, training_tensor_dtypes, eval_tensor_dtypes, last_batch_train_shapes, last_batch_test_shapes, max_buffers, keep_buffers_alive) = buffers_ctx
self.training_tensor_shapes = training_tensor_shapes
self.eval_tensor_shapes = eval_tensor_shapes
self.training_tensor_dtypes = training_tensor_dtypes
self.eval_tensor_dtypes = eval_tensor_dtypes
self.last_batch_train_shapes = last_batch_train_shapes
self.last_batch_test_shapes = last_batch_test_shapes
self.max_buffers = max_buffers
self.keep_buffers_alive = keep_buffers_alive
self.changed_shapes_last_batch_fwd = False
self.changed_shapes_last_batch_bwd = False
def fix_after_recv(self, x, is_grad=False):
' Fixes received buffer after sync wait ends'
if is_grad:
out = []
ix = iter(x)
for (name, ranks) in self.grad_rcv_items:
if (len(ranks) > 1):
tensors = [t for t in [next(ix) for _ in range(len(ranks))] if (t is not None)]
out.append(torch.stack(tensors).sum(0))
else:
out.append(next(ix))
return out
return x
|
def zip_discard_compr(*iterables, sentinel=object()):
return [[entry for entry in iterable if (entry is not sentinel)] for iterable in zip_longest(*iterables, fillvalue=sentinel)]
|
def grouper(iterable, n):
'Collect data into *non fixed-length* chunks or blocks\n (changed the one in itertools recepies)\n '
args = ([iter(iterable)] * n)
return zip_discard_compr(*args)
|
class FuturesHandlerBase(abc.ABC):
def __init__(self):
pass
@abc.abstractmethod
def after_forward(self, sent_request_objects, done_fwds, training):
pass
@abc.abstractmethod
def after_backward(self, sent_request_objects, done_bwds):
pass
@abc.abstractmethod
def clean_train(self):
pass
@abc.abstractmethod
def clean_eval(self):
pass
|
class CommunicationHandlerBase(abc.ABC):
' Base class for all communication handlers.\n Handles communication between stages.\n '
def __init__(self):
pass
def init_buffers_ctx(self, buffers_ctx):
pass
def init_buffers(self):
pass
@abc.abstractmethod
def send_activations(self, x, batch_index):
'\n Returns:\n request_objects: list of async handler objects\n sent_items: list of items sent\n '
pass
@abc.abstractmethod
def send_gradients(self, x, batch_index):
'\n Returns:\n request_objects: list of async handler objects\n sent_items: list of items sent\n '
pass
@abc.abstractmethod
def recv_activations(self, x, batch_index):
'\n Returns\n request_objects: list of async handler objects\n '
pass
@abc.abstractmethod
def recv_gradients(self, x, batch_index):
'\n Returns\n request_objects: list of async handler objects\n '
pass
@abc.abstractmethod
def init_process_group(self, *args, **kw):
pass
def train(self):
pass
def eval(self):
pass
def pre_recv_gradients(self, batch_idx, num_batches, last_due_end):
pass
def wait_recv_gradients(self, *args):
pass
def get_data_forward(self, batch_idx, num_batches, last_due_end):
pass
@staticmethod
@abc.abstractmethod
def create_futures_handler(*args, **kw) -> FuturesHandlerBase:
pass
|
class MultiprocessingCommunicationHandler(SimpleCommBase):
def __init__(self, share, stage_to_device_map, local_rank_to_device_map, *args, **kw):
kw['GRAD_UGLY_SHAMEFUL_NAME'] = '_grad'
super().__init__(*args, **kw)
(rcv_queues, buffer_reuse_queues) = share
self.rcv_queues = rcv_queues
self.buffer_reuse_queues = buffer_reuse_queues
self.local_rank_to_device_map = local_rank_to_device_map
self._create_streams()
self.rcv_shared_parameters = dict()
self.send_shared_parameters = defaultdict(set)
self.send_buffers = dict()
self.send_buffers_versions = {}
self.pool_send_act = concurrent.futures.ThreadPoolExecutor(1, initializer=torch.cuda.set_device, initargs=(self.device,))
self.pool_send_grad = concurrent.futures.ThreadPoolExecutor(1, initializer=torch.cuda.set_device, initargs=(self.device,))
self.sent_object_patience = self.pipe_config.max_send_depth_for_stage(self.stage)
def _create_streams(self):
self.grad_send_stream = torch.cuda.Stream(self.device, priority=(- 2))
self.acti_send_stream = torch.cuda.Stream(self.device, priority=(- 1))
self.grad_recv_stream = torch.cuda.Stream(self.device, priority=(- 2))
self.acti_recv_stream = torch.cuda.Stream(self.device, priority=(- 2))
self.main_stream = torch.cuda.current_stream()
def init_buffers(self):
training_tensor_shapes = self.training_tensor_shapes
eval_tensor_shapes = self.eval_tensor_shapes
training_tensor_dtypes = self.training_tensor_dtypes
eval_tensor_dtypes = self.eval_tensor_dtypes
last_batch_train_shapes = self.last_batch_train_shapes
last_batch_test_shapes = self.last_batch_test_shapes
keep_buffers_alive = self.keep_buffers_alive
shapes_are_equal = (eval_tensor_shapes == training_tensor_shapes)
dtypes_are_equal = (eval_tensor_dtypes == training_tensor_dtypes)
dtypes_and_shapes_are_equal = (shapes_are_equal and dtypes_are_equal)
no_different_last_batch_shapes = ((last_batch_train_shapes is None) and (last_batch_test_shapes is None))
if (dtypes_and_shapes_are_equal and no_different_last_batch_shapes):
keep_buffers_alive = True
elif (keep_buffers_alive and dtypes_and_shapes_are_equal):
raise ValueError("got keep_buffers_alive=True, but can't because last batch has different size.")
self.keep_buffers_alive = keep_buffers_alive
self.dtypes_and_shapes_are_equal = dtypes_and_shapes_are_equal
self._send_ack_on_start(for_grads=False)
self._send_ack_on_start(for_grads=True)
self._create_send_buffers(self.send_ranks, for_grads=False)
self._create_send_buffers(self.grad_send_dict, for_grads=True)
def _send_ack_on_start(self, for_grads=False):
' The receiver sends an initial "ack" to the sender.\n No buffer is actually created.\n '
is_activations = (not for_grads)
if is_activations:
tensor_names = self.receive_ranks.keys()
senders = self.receive_ranks
else:
tensor_names = self.grad_rcv_dict_without_extention.keys()
senders = self.grad_rcv_dict_without_extention
for tensor_name in tensor_names:
sending_ranks = senders[tensor_name]
if is_activations:
assert (len(sending_ranks) == 1)
for sending_rank in sending_ranks:
reuse_q = self.buffer_reuse_queues[sending_rank][self.rank]
n_acks_to_send = 1
if is_activations:
send_stage = self.pipe_config.rank_to_stage_idx(sending_rank)
recv_stage = self.pipe_config.rank_to_stage_idx(self.rank)
send_dist_between_stages = self.pipe_config.send_depth_between_stages(send_stage=send_stage, recv_stage=recv_stage, is_activations=True)
if (send_dist_between_stages > 1):
sent_items_between_stages = self.pipe_config.sent_items_between_stages(send_stage, recv_stage)
is_single_tensor_between_stages = (len(sent_items_between_stages) == 1)
if (not is_single_tensor_between_stages):
raise NotImplementedError(f'Items: total of {len(sent_items_between_stages)} items are send between stages with patience={send_dist_between_stages} we currently support only 1, such items. {sent_items_between_stages}')
required_patience = send_dist_between_stages
n_acks_to_send = required_patience
warnings.warn('Sending multiple acks between stages to allow higher patience')
for _ in range(n_acks_to_send):
reuse_q.put(None)
def _create_send_buffers(self, tensor_send_ranks, for_grads):
is_activations = (not for_grads)
' the sender creates the buffers '
tensor_names = tensor_send_ranks.keys()
for tensor_name in tensor_names:
d = {}
for rank in tensor_send_ranks[tensor_name]:
d[rank] = None
self.send_buffers[tensor_name] = d
def init_process_group(self, *args, **kw):
pass
def _recv_tensors_p2p(self, batch_idx, ranks_dict_items, is_activations):
try:
stream = (self.grad_recv_stream if (not is_activations) else self.acti_recv_stream)
with torch.cuda.stream(stream):
request_objects = []
if (not is_activations):
pass
for (tensor_name, receive_ranks) in ranks_dict_items:
if (not is_activations):
pass
for receive_rank in receive_ranks:
q = self.rcv_queues[self.rank][receive_rank]
if self.verbose:
tensor_tag = (self.tensor_tags[tensor_name] + (self.TOTAL_TAGS * batch_idx))
self.logger.info(f'rank={self.local_rank}: q.get(), src={receive_rank}, tag={tensor_tag}, name={tensor_name}')
x = q.get()
if self.verbose:
tensor_tag = (self.tensor_tags[tensor_name] + (self.TOTAL_TAGS * batch_idx))
self.logger.info(f'rank={self.local_rank}: done q.get(), src={receive_rank}, tag={tensor_tag}, name={tensor_name}')
if isinstance(x, torch.Tensor):
event = torch.cuda.Event(blocking=True)
t = x.detach().clone()
event.record(stream)
reuse_q = self.buffer_reuse_queues[receive_rank][self.rank]
event.synchronize()
reuse_q.put(None)
request_objects.append(t)
else:
reuse_q = self.buffer_reuse_queues[receive_rank][self.rank]
reuse_q.put(None)
request_objects.append(x)
except Exception as e:
print('ERROR in recv thread')
print(sys.exc_info())
raise e
return request_objects
def recv_activations(self, x, batch_idx, is_last_batch):
return self._recv_tensors_p2p(batch_idx, self.activations_rcv_items, is_activations=True)
def recv_gradients(self, x, batch_idx, is_last_batch):
return self._recv_tensors_p2p(batch_idx, self.grad_rcv_items, is_activations=False)
def _send_tensors_p2p(self, x, batch_idx, ranks_dict_items, is_grad):
try:
assert (len(x) == len(ranks_dict_items)), ((((str((len(x), len(ranks_dict_items))) + f'is_grad:{is_grad}') + f'batch:{batch_idx}') + f'rank:{self.rank}') + str(ranks_dict_items))
request_objects = []
prev_work_event = torch.cuda.Event(blocking=True)
prev_work_event.record(self.main_stream)
stream = (self.grad_send_stream if is_grad else self.acti_send_stream)
with torch.cuda.stream(stream):
prev_work_event.wait(stream)
with torch.no_grad():
if is_grad:
pass
for (tensor, (tensor_name, send_ranks)) in zip(x, ranks_dict_items):
if is_grad:
pass
if isinstance(tensor, torch.nn.Parameter):
tensor.share_memory_()
my_buff_reuse_queues = self.buffer_reuse_queues[self.rank]
if isinstance(tensor, torch.Tensor):
tensor = tensor.detach()
send_buffers = self.send_buffers[tensor_name]
for send_rank in send_ranks:
buff_q = my_buff_reuse_queues[send_rank]
if self.verbose:
self.logger.info(f'rank={self.rank}: getting reuse buffer from {send_rank}, for {tensor_name} (start)')
buff_q.get()
if self.verbose:
self.logger.info(f'rank={self.rank}: getting reuse buffer from {send_rank} for {tensor_name} (done)')
out_q = self.rcv_queues[send_rank][self.rank]
buff = send_buffers[send_rank]
if (_COPY_INSTEAD_CLONE_WORKING and (buff is not None) and (tensor.size() == buff.size())):
buff.copy_(tensor)
else:
buff = tensor.to(self.local_rank_to_device_map[send_rank])
send_buffers[send_rank] = buff
event = torch.cuda.Event(blocking=True)
stream.record_event(event)
event.synchronize()
out_q.put(buff)
if self.verbose:
tensor_tag = (self.tensor_tags[tensor_name] + (self.TOTAL_TAGS * batch_idx))
self.logger.info(f'rank={self.rank}: done send(), dst={send_rank}, tag={tensor_tag}, name={tensor_name}')
else:
for send_rank in send_ranks:
buff_q = my_buff_reuse_queues[send_rank]
buff_q.get()
out_q = self.rcv_queues[send_rank][self.rank]
out_q.put(tensor)
if self.verbose:
tensor_tag = (self.tensor_tags[tensor_name] + (self.TOTAL_TAGS * batch_idx))
self.logger.info(f'rank={self.rank}: done send(), dst={send_rank}, tag={tensor_tag}, name={tensor_name}')
except Exception as e:
print('ERRRRRORRRRR in send thread')
print(sys.exc_info())
raise e
return request_objects
def send_activations(self, x, batch_idx):
future = self.pool_send_act.submit(self._send_tensors_p2p, x, batch_idx, self.send_ranks.items(), False)
return future
def send_gradients(self, x, batch_idx):
future = self.pool_send_grad.submit(self._send_tensors_p2p, x, batch_idx, self.grad_send_items, True)
return future
def _ensure_bwd_send_buffers_size_set(self, last_due_end):
if (last_due_end and self.last_batch_train_shapes):
print(f'rank: {self.rank} replacing buffers for last batch, backward')
self.changed_shapes_last_batch_bwd = True
elif self.changed_shapes_last_batch_bwd:
self.changed_shapes_last_batch_bwd = False
def _ensure_fwd_send_buffers_size_set(self, last_due_end):
' Here from legacy reasons\n TODO: this is currently its unneeded\n '
if (last_due_end and ((self.training and self.last_batch_train_shapes) or ((not self.training) and self.last_batch_test_shapes))):
print(f'rank: {self.rank} replacing buffers for last batch, forward')
self.changed_shapes_last_batch_fwd = True
def train(self):
'Sets training mode.\n Also Handles the transition : eval -> train\n '
self.training = True
self.set_tensor_shapes(self.training_tensor_shapes)
self.set_tensor_dtypes(self.training_tensor_dtypes)
def eval(self):
'Sets evaluation mode.\n Also handles the transition : train -> eval\n Also handles buffer sync in case stage is replicated\n '
self.training = False
self.set_tensor_shapes(self.eval_tensor_shapes)
self.set_tensor_dtypes(self.eval_tensor_dtypes)
def get_data_forward(self, batch_idx, num_batches, last_due_end):
self._ensure_fwd_send_buffers_size_set(last_due_end)
x = self.recv_activations(None, batch_idx, last_due_end)
return x
def pre_recv_gradients(self, batch_idx, num_batches, last_due_end):
self._ensure_bwd_send_buffers_size_set(last_due_end)
def wait_recv_gradients(self, *args):
g = self.recv_gradients(None, *args)
g = self.fix_after_recv(g, True)
return g
def create_futures_handler(self, *args, **kw):
self.futures_handler = FuturesHandler(self.pipe_config, self.stage)
return self.futures_handler
|
class FuturesHandler(FuturesHandlerBase):
' Handle sent object futures '
def __init__(self, pipe_config: PipelineConfig, my_stage_id: int):
super().__init__()
self.sent_object_patience = pipe_config.max_send_depth_for_stage(my_stage_id)
self.last_fwd_result = []
self.last_bwd_result = []
def after_forward(self, ro, done_fwds, training):
self.last_fwd_result.append(ro)
if (not training):
self.clean_eval()
def after_backward(self, ro, done_bwds):
self.last_bwd_result.append(ro)
def clean_train(self):
for ll in [self.last_fwd_result, self.last_bwd_result]:
for ro in ll:
if isinstance(ro, list):
for r in ro:
r.result()
elif (ro is not None):
ro.result()
self.last_bwd_result.clear()
self.last_fwd_result.clear()
def clean_eval(self):
ll = self.last_fwd_result
for ro in ll:
if isinstance(ro, list):
for r in ro:
r.result()
elif (ro is not None):
ro.result()
self.last_fwd_result.clear()
|
class P2PCommunicationHandler(BufferSimpleCommBase):
def __init__(self, *args, **kw):
kw['GRAD_UGLY_SHAMEFUL_NAME'] = '_grad'
super().__init__(*args, **kw)
def set_tensor_dtypes(self, tensor_dtypes):
if (tensor_dtypes is not self.tensor_dtypes):
super().set_tensor_dtypes(tensor_dtypes)
self.tensor_comm_warper = TensorWrapper(self, dtypes=self.tensor_dtypes)
def fix_after_recv(self, x, is_grad=False):
res = []
ix = iter(x)
recv_ranks = (self.grad_rcv_dict_without_extention if is_grad else self.receive_ranks)
for (name, ranks) in recv_ranks.items():
for _ in ranks:
res.append(self.tensor_comm_warper.convert_activations_recv(name, next(ix)))
return super().fix_after_recv(res, is_grad=is_grad)
def init_process_group(self, *args, **kw):
super().init_process_group(*args, **kw)
def _recv_tensors_p2p(self, buffers, batch_idx, ranks_dict_items, is_grad):
ix = iter(buffers)
with torch.no_grad():
request_objects = []
for (tensor_name, receive_ranks) in ranks_dict_items:
if is_grad:
pass
for receive_rank in receive_ranks:
tensor = next(ix)
tensor_tag = (self.tensor_tags[tensor_name] + (self.TOTAL_TAGS * batch_idx))
if self.verbose:
self.logger.info(f'rank={self.local_rank}: irecv, src={receive_rank}, tag={tensor_tag}, name={tensor_name}, buffshape={tensor.shape}')
request_obj = dist.irecv(tensor, receive_rank, tag=tensor_tag)
request_objects.append(request_obj)
return request_objects
def recv_activations(self, x, batch_idx):
return self._recv_tensors_p2p(x, batch_idx, self.activations_rcv_items, is_grad=False)
def recv_gradients(self, x, batch_idx):
return self._recv_tensors_p2p(x, batch_idx, self.grad_rcv_items, is_grad=True)
def _send_tensors_p2p(self, x, batch_idx, ranks_dict_items, is_grad):
with torch.no_grad():
request_objects = []
assert (len(x) == len(ranks_dict_items))
for (tensor, (tensor_name, send_ranks)) in zip(x, ranks_dict_items):
tensor_tag = (self.tensor_tags[tensor_name] + (self.TOTAL_TAGS * batch_idx))
if is_grad:
pass
for send_rank in send_ranks:
if (is_grad and tensor_name.endswith(self.GRAD_UGLY_SHAMEFUL_NAME)):
cname = tensor_name[:(- len(self.GRAD_UGLY_SHAMEFUL_NAME))]
else:
cname = tensor_name
tensor = self.tensor_comm_warper.convert_activations_send(cname, tensor)
tensor = tensor.detach().contiguous()
if self.verbose:
self.logger.info(f'rank={self.local_rank}: isend, dst={send_rank}, tag={tensor_tag}, name={tensor_name}, shape={tensor.shape}')
if (not self.cpu):
torch.cuda.current_stream(self.device).synchronize()
request_obj = dist.isend(tensor, send_rank, tag=tensor_tag)
request_objects.append(request_obj)
return request_objects
def send_activations(self, x, batch_idx):
return self._send_tensors_p2p(x, batch_idx, self.send_ranks.items(), False)
def send_gradients(self, x, batch_idx):
return self._send_tensors_p2p(x, batch_idx, self.grad_send_items, is_grad=True)
|
def tensor_tags_from_config(pipe_config: PipelineConfig, num_chunks=1, target_tensor_names=None, GRAD_UGLY_SHAMEFUL_NAME='_grad'):
tensor_tags = {}
tensor_tag = 1
for (i, stage) in pipe_config.d['stages'].items():
input_tensors = stage['inputs']
output_tensors = stage['outputs']
req_grad = pipe_config.get_inputs_req_grad_for_stage(i)
outputs_req_grad = pipe_config.get_outputs_req_grad_for_stage(i)
for name_post_addition in ['', GRAD_UGLY_SHAMEFUL_NAME]:
for input_tensor in input_tensors:
if (input_tensor not in req_grad):
assert (input_tensor in pipe_config.d['model_inputs'])
continue
input_tensor += name_post_addition
if (input_tensor not in tensor_tags):
tensor_tags[input_tensor] = tensor_tag
tensor_tag += num_chunks
for output_tensor in output_tensors:
if (output_tensor not in outputs_req_grad):
assert (output_tensor in pipe_config.d['model_outputs'])
continue
output_tensor += name_post_addition
if (output_tensor not in tensor_tags):
tensor_tags[output_tensor] = tensor_tag
tensor_tag += num_chunks
if target_tensor_names:
for target_tensor_name in sorted(target_tensor_names):
tensor_tags[target_tensor_name] = tensor_tag
tensor_tag += num_chunks
total_tags = len(tensor_tags)
return (tensor_tags, total_tags)
|
def None_tensor():
return torch.tensor(np.nan)
|
def is_None_tensor(recved_tensor):
return ((recved_tensor.size() == torch.Size()) and np.isnan(recved_tensor.item()))
|
class TensorWrapper():
' Hack for sending everything as tensors\n e.g:\n int -> tensor -> send -> recv -> int\n\n TODO: mapping conventions\n '
def __init__(self, comm_handler: SimpleCommBase, dtypes: Dict[(str, type)]):
self.send_dtype_map = TensorWrapper.make_send_dtype_map(dtypes)
self.recv_dtype_map = TensorWrapper.make_recv_dtype_map(dtypes)
self.dtypes = dtypes
self.comm_handler = comm_handler
def convert_activations_send(self, name: str, value):
if isinstance(value, Tensor):
return value
elif (value is None):
if ((dtype := self.dtypes[name]) is not None):
warnings.warn(f'expected to send dtype {self.dtypes[name]} for tensor {name} for got None instead, will send a tensor of zeros')
shape = self.comm_handler.tensor_shapes[name]
return torch.zeros(shape, dtype=dtype)
return None_tensor()
dtype = self.send_dtype_map.get(name, None)
return torch.tensor(value, dtype=dtype)
def convert_activations_recv(self, name: str, recved_tensor: Tensor):
if is_None_tensor(recved_tensor):
return None
elif (not isinstance(self.recv_dtype_map[name], torch.dtype)):
dtype = self.recv_dtype_map[name]
return dtype(recved_tensor)
else:
return recved_tensor
@staticmethod
def make_send_dtype_map(dtypes):
d = {}
for (name, dtype) in dtypes.items():
if (dtype in TABLE):
d[name] = TABLE[dtype]
return d
@staticmethod
def make_recv_dtype_map(dtypes):
return dtypes
|
def get_propagator_cls(args) -> Type[PipelineDataPropagator]:
propagator_cls = AVAILABLE_PROPAGATORS.get(args.data_propagator)
if (propagator_cls is None):
raise NotImplementedError(f'args.data_propagator={args.data_propagator}, AVAILABLE_PROPAGATORS={AVAILABLE_PROPAGATORS.keys()}')
return propagator_cls
|
class AutomaticPipelinePropagator(PipelineDataPropagator):
SENDING_LABELS_IN_PIPELINE = False
def __init__(self, device, is_last_partition, is_first_partition, stage_id, pipe_config: PipelineConfig):
super().__init__()
self.device = device
pcs = pipe_config.d['stages'][stage_id]
inputs_from_dl = pipe_config.get_dataset_inputs_for_stage(stage_id)
self.inputs_from_dl = inputs_from_dl
self.len_inputs_from_dl = len(inputs_from_dl)
num_total_inputs = len(pcs['inputs'])
is_depth_first_stage_by_depth = (pipe_config.get_depth_for_stage(stage_id) == (pipe_config.pipeline_depth - 1))
if ((self.len_inputs_from_dl == num_total_inputs) and (not is_depth_first_stage_by_depth)):
raise NotImplementedError(f'a non-first stage ({stage_id}) got all {num_total_inputs} inputs from dataloder, we currently assume it does not happen')
if (is_depth_first_stage_by_depth and (not is_first_partition)):
warnings.warn('experimentally allowing is_depth_first_stage_by_depth and not is_first_partition')
if is_last_partition:
batch_dim = pipe_config.d['batch_dim']
for (i, is_batched) in enumerate(pcs['inputs'].values()):
is_batched = is_batched['is_batched']
if is_batched:
break
else:
raise NotImplementedError('we except one of last partition inputs to be batched, so the batch will be given to statistics')
def unpack_cls(self, x):
assert (isinstance(x, tuple) or isinstance(x, list))
return (x, x[i].size(batch_dim))
else:
def unpack_cls(self, x):
assert (isinstance(x, tuple) or isinstance(x, list))
return (x,)
self.unpack_data_for_partition = types.MethodType(unpack_cls, self)
def unpack_data_for_partition(self, data):
raise NotImplementedError('This method should be patched at init')
def pack_send_context(self, model_out, *ctx):
return tuple(((x.detach().contiguous() if isinstance(x, torch.Tensor) else x) for x in chain(model_out, ctx)))
def preload_from_dataloader(self, dlitr):
if (dlitr is None):
return ((), ())
y = next(dlitr)
with torch.no_grad():
if (isinstance(y, tuple) or isinstance(y, list)):
y = tuple((z.to(self.device, non_blocking=True) for z in y))
else:
y = (y.to(self.device, non_blocking=True),)
to_partition = y[:self.len_inputs_from_dl]
to_outside_loss = y[self.len_inputs_from_dl:]
return (to_partition, to_outside_loss)
|
class AutomaticPipelinePropagatorNonContig(PipelineDataPropagator):
def __init__(self, *args, **kw):
super().__init__()
def pack_send_context(self, model_out, *ctx):
return (*model_out, *ctx)
|
class CVTargetInPipePropagator(PipelineDataPropagator):
def __init__(self, device, is_last_partition, is_first_partition):
super().__init__()
self.device = device
if is_last_partition:
self.unpack_cls = self.unpack_data_for_last_partition
elif is_first_partition:
self.unpack_cls = self.unpack_data_for_first_partition
else:
self.unpack_cls = self.unpack_data_for_mid_partition
def unpack_data_for_partition(self, data):
return self.unpack_cls(data)
def unpack_data_for_last_partition(self, data):
(*x, y) = data
with torch.no_grad():
y = y.to(self.device, non_blocking=True)
return (x, y)
def unpack_data_for_first_partition(self, data):
(x, y) = data
with torch.no_grad():
x = x.to(self.device, non_blocking=True)
return (x, y)
def unpack_data_for_mid_partition(self, data):
(*x, y) = data
return (x, y)
def pack_send_context(self, model_out, *ctx):
return (*model_out, *ctx)
|
class PipelineDataPropagator(abc.ABC):
'\n Class describing how to handle data loaded or passed through the pipeline.\n \n Usage:\n\n # Get data:\n (1)\n from_prev_stage = (...) # get it from somewhere\n to_stage, to_somewhere_else = propagator.preload_from_dataloader(dlitr)\n x = (*to_stage, *from_prev_stage)\n\n # Run the model:\n (2)\n x, *ctx = propagator.unpack_data_for_partition(data)\n model_out = model(x, ...)\n\n # Unify outside context\n (3)\n ctx = (*ctx, *to_somewhere_else)\n\n # Send Data:\n (4)\n t = propagator.pack_send_context(model_out, *ctx)\n send(t) ...\n '
def __init__(self, *args, **kw):
pass
@abc.abstractmethod
def unpack_data_for_partition(self, data) -> Tuple[(Tuple[Any], Tuple[Any])]:
' In case we send labels in pipeline: extract them from the output.\n For last partition: extract what is loaded for outside loss and statistics (e.g: batch size, ...)\n '
pass
@abc.abstractmethod
def pack_send_context(self, model_out, *ctx) -> Tuple[Any]:
pass
def preload_from_dataloader(self, dlitr) -> Tuple[(Tuple[Any], Tuple[Any])]:
if (dlitr is None):
return ((), ())
else:
raise NotImplementedError()
|
def _call_method(method, rref, *args, **kwargs):
'\n a helper function to call a method on the given RRef\n '
return method(rref.local_value(), *args, **kwargs)
|
def _remote_method(method, rref, *args, **kwargs):
'\n a helper function to run method on the owner of rref and fetch back the\n result using RPC\n '
args = ([method, rref] + list(args))
return rpc.rpc_async(rref.owner(), _call_method, args=args, kwargs=kwargs)
|
class Observer():
def __init__(self):
self.last_calc = 0
self.is_set = False
def set_params(self, parameters, max_norm, norm_type=2):
self.parameters = parameters
self.max_norm = max_norm
self.norm_type = norm_type
self.is_set = True
def calc_local_partial_total_norm(self):
parameters = self.parameters
max_norm = self.max_norm
norm_type = self.norm_type
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter((lambda p: (p.grad is not None)), parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if (norm_type == inf):
total_norm = max((p.grad.data.abs().max() for p in parameters))
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += (param_norm.item() ** norm_type)
self.last_calc = total_norm
return total_norm
def get_last_partial_total_norm(self):
return self.last_calc
|
class Agent():
' Agent to perform approximation of distributed grad norm in async pipeline.\n\n Instead of synchronously waiting for grad norm result from earlier stages,\n Will use previouslly calculated grad norm results (i.e from last batch).\n\n if no there is no previous grad norm result, will use 0 as default.\n '
def __init__(self, world_size, rank):
self.ob_rrefs = []
self.rank = rank
rpc.init_rpc(OBSERVER_NAME.format(rank), rank=rank, world_size=world_size)
for ob_rank in range(0, world_size):
if (ob_rank == rank):
continue
ob_info = rpc.get_worker_info(OBSERVER_NAME.format(ob_rank))
self.ob_rrefs.append(rpc.remote(ob_info, Observer))
self.my_rref = rpc.remote(ob_info, Observer)
def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
others_res = [_remote_method(Observer.get_last_partial_total_norm, ob, parameters, max_norm, norm_type=norm_type) for ob in self.ob_rrefs]
my_rref = self.my_rref
my_rref.set_params(parameters, max_norm, norm_type=2)
my_norm = my_rref.calc_local_partial_total_norm()
total_norm = sum([i.wait() for i in others_res], my_norm)
total_norm = (total_norm ** (1.0 / norm_type))
clip_coef = (max_norm / (total_norm + 1e-06))
if (clip_coef < 1):
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm
|
def convert_to_num_gpus(module, num_gpus_to_sim):
'Converts torch.nn.BatchNorm instances to simulate DDP with the desired number of GPUs'
module_output = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
new_cls = MODULE_INSTANCES_TO_REPLACE[module.__class__.__name__]
module_output = new_cls(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, num_gpus_to_sim=num_gpus_to_sim)
if module.affine:
module_output.weight.data = module.weight.data.clone().detach()
module_output.bias.data = module.bias.data.clone().detach()
module_output.weight.requires_grad = module.weight.requires_grad
module_output.bias.requires_grad = module.bias.requires_grad
module_output.num_batches_tracked = module.num_batches_tracked
for i in range(num_gpus_to_sim):
setattr(module_output, f'running_mean_{i}', module.running_mean.clone().detach())
setattr(module_output, f'running_var_{i}', module.running_var.clone().detach())
for (name, child) in module.named_children():
module_output.add_module(name, convert_to_num_gpus(child, num_gpus_to_sim))
del module
return module_output
|
class _NormBase(Module):
'Common base of _InstanceNorm and _BatchNorm'
_version = 2
__constants__ = ['track_running_stats', 'momentum', 'eps', 'weight', 'bias', 'running_mean', 'running_var', 'num_batches_tracked', 'num_features', 'affine']
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, num_gpus_to_sim=1):
super(_NormBase, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
self.num_gpus_to_sim = num_gpus_to_sim
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
raise NotImplementedError()
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
for i in range(num_gpus_to_sim):
self.register_buffer(f'running_mean_{i}', torch.zeros(num_features))
self.register_buffer(f'running_var_{i}', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
raise NotImplementedError()
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
for i in range(self.num_gpus_to_sim):
getattr(self, f'running_mean_{i}').zero_()
getattr(self, f'running_var_{i}').fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def _check_input_dim(self, input):
raise NotImplementedError
def extra_repr(self):
return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, track_running_stats={track_running_stats}'.format(**self.__dict__)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if (((version is None) or (version < 2)) and self.track_running_stats):
num_batches_tracked_key = (prefix + 'num_batches_tracked')
if (num_batches_tracked_key not in state_dict):
state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
super(_NormBase, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs)
|
class _BatchNorm(_NormBase):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True, num_gpus_to_sim=1):
super(_BatchNorm, self).__init__(num_features, eps, momentum, affine, track_running_stats, num_gpus_to_sim)
def forward(self, input):
self._check_input_dim(input)
if (self.momentum is None):
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if (self.training and self.track_running_stats):
if (self.num_batches_tracked is not None):
self.num_batches_tracked = (self.num_batches_tracked + 1)
if (self.momentum is None):
exponential_average_factor = (1.0 / float(self.num_batches_tracked))
else:
exponential_average_factor = self.momentum
return torch.cat([F.batch_norm(chunk, getattr(self, f'running_mean_{i}'), getattr(self, f'running_var_{i}'), self.weight, self.bias, (self.training or (not self.track_running_stats)), exponential_average_factor, self.eps) for (i, chunk) in enumerate(torch.chunk(input, self.num_gpus_to_sim))])
|
class BatchNorm1d(_BatchNorm):
"Applies Batch Normalization over a 2D or 3D input (a mini-batch of 1D\n inputs with optional additional channel dimension) as described in the paper\n `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{\\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and :math:`\\gamma` and :math:`\\beta` are learnable parameter vectors\n of size `C` (where `C` is the input size). By default, the elements of :math:`\\gamma` are set\n to 1 and the elements of :math:`\\beta` are set to 0.\n Also by default, during training this layer keeps running estimates of its\n computed mean and variance, which are then used for normalization during\n evaluation. The running estimates are kept with a default :attr:`momentum`\n of 0.1.\n If :attr:`track_running_stats` is set to ``False``, this layer then does not\n keep running estimates, and batch statistics are instead used during\n evaluation time as well.\n .. note::\n This :attr:`momentum` argument is different from one used in optimizer\n classes and the conventional notion of momentum. Mathematically, the\n update rule for running statistics here is\n :math:`\\hat{x}_\\text{new} = (1 - \\text{momentum}) \\times \\hat{x} + \\text{momentum} \\times x_t`,\n where :math:`\\hat{x}` is the estimated statistic and :math:`x_t` is the\n new observed value.\n Because the Batch Normalization is done over the `C` dimension, computing statistics\n on `(N, L)` slices, it's common terminology to call this Temporal Batch Normalization.\n Args:\n num_features: :math:`C` from an expected input of size\n :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Can be set to ``None`` for cumulative moving average\n (i.e. simple average). Default: 0.1\n affine: a boolean value that when set to ``True``, this module has\n learnable affine parameters. Default: ``True``\n track_running_stats: a boolean value that when set to ``True``, this\n module tracks the running mean and variance, and when set to ``False``,\n this module does not track such statistics and always uses batch\n statistics in both training and eval modes. Default: ``True``\n Shape:\n - Input: :math:`(N, C)` or :math:`(N, C, L)`\n - Output: :math:`(N, C)` or :math:`(N, C, L)` (same shape as input)\n Examples::\n >>> # With Learnable Parameters\n >>> m = nn.BatchNorm1d(100)\n >>> # Without Learnable Parameters\n >>> m = nn.BatchNorm1d(100, affine=False)\n >>> input = torch.randn(20, 100)\n >>> output = m(input)\n .. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:\n https://arxiv.org/abs/1502.03167\n "
def _check_input_dim(self, input):
if ((input.dim() != 2) and (input.dim() != 3)):
raise ValueError('expected 2D or 3D input (got {}D input)'.format(input.dim()))
|
class BatchNorm2d(_BatchNorm):
"Applies Batch Normalization over a 4D input (a mini-batch of 2D inputs\n with additional channel dimension) as described in the paper\n `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and :math:`\\gamma` and :math:`\\beta` are learnable parameter vectors\n of size `C` (where `C` is the input size). By default, the elements of :math:`\\gamma` are set\n to 1 and the elements of :math:`\\beta` are set to 0.\n Also by default, during training this layer keeps running estimates of its\n computed mean and variance, which are then used for normalization during\n evaluation. The running estimates are kept with a default :attr:`momentum`\n of 0.1.\n If :attr:`track_running_stats` is set to ``False``, this layer then does not\n keep running estimates, and batch statistics are instead used during\n evaluation time as well.\n .. note::\n This :attr:`momentum` argument is different from one used in optimizer\n classes and the conventional notion of momentum. Mathematically, the\n update rule for running statistics here is\n :math:`\\hat{x}_\\text{new} = (1 - \\text{momentum}) \\times \\hat{x} + \\text{momentum} \\times x_t`,\n where :math:`\\hat{x}` is the estimated statistic and :math:`x_t` is the\n new observed value.\n Because the Batch Normalization is done over the `C` dimension, computing statistics\n on `(N, H, W)` slices, it's common terminology to call this Spatial Batch Normalization.\n Args:\n num_features: :math:`C` from an expected input of size\n :math:`(N, C, H, W)`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Can be set to ``None`` for cumulative moving average\n (i.e. simple average). Default: 0.1\n affine: a boolean value that when set to ``True``, this module has\n learnable affine parameters. Default: ``True``\n track_running_stats: a boolean value that when set to ``True``, this\n module tracks the running mean and variance, and when set to ``False``,\n this module does not track such statistics and always uses batch\n statistics in both training and eval modes. Default: ``True``\n Shape:\n - Input: :math:`(N, C, H, W)`\n - Output: :math:`(N, C, H, W)` (same shape as input)\n Examples::\n >>> # With Learnable Parameters\n >>> m = nn.BatchNorm2d(100)\n >>> # Without Learnable Parameters\n >>> m = nn.BatchNorm2d(100, affine=False)\n >>> input = torch.randn(20, 100, 35, 45)\n >>> output = m(input)\n .. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:\n https://arxiv.org/abs/1502.03167\n "
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim()))
|
class BatchNorm3d(_BatchNorm):
"Applies Batch Normalization over a 5D input (a mini-batch of 3D inputs\n with additional channel dimension) as described in the paper\n `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`_ .\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x] + \\epsilon}} * \\gamma + \\beta\n The mean and standard-deviation are calculated per-dimension over\n the mini-batches and :math:`\\gamma` and :math:`\\beta` are learnable parameter vectors\n of size `C` (where `C` is the input size). By default, the elements of :math:`\\gamma` are set\n to 1 and the elements of :math:`\\beta` are set to 0.\n Also by default, during training this layer keeps running estimates of its\n computed mean and variance, which are then used for normalization during\n evaluation. The running estimates are kept with a default :attr:`momentum`\n of 0.1.\n If :attr:`track_running_stats` is set to ``False``, this layer then does not\n keep running estimates, and batch statistics are instead used during\n evaluation time as well.\n .. note::\n This :attr:`momentum` argument is different from one used in optimizer\n classes and the conventional notion of momentum. Mathematically, the\n update rule for running statistics here is\n :math:`\\hat{x}_\\text{new} = (1 - \\text{momentum}) \\times \\hat{x} + \\text{momentum} \\times x_t`,\n where :math:`\\hat{x}` is the estimated statistic and :math:`x_t` is the\n new observed value.\n Because the Batch Normalization is done over the `C` dimension, computing statistics\n on `(N, D, H, W)` slices, it's common terminology to call this Volumetric Batch Normalization\n or Spatio-temporal Batch Normalization.\n Args:\n num_features: :math:`C` from an expected input of size\n :math:`(N, C, D, H, W)`\n eps: a value added to the denominator for numerical stability.\n Default: 1e-5\n momentum: the value used for the running_mean and running_var\n computation. Can be set to ``None`` for cumulative moving average\n (i.e. simple average). Default: 0.1\n affine: a boolean value that when set to ``True``, this module has\n learnable affine parameters. Default: ``True``\n track_running_stats: a boolean value that when set to ``True``, this\n module tracks the running mean and variance, and when set to ``False``,\n this module does not track such statistics and always uses batch\n statistics in both training and eval modes. Default: ``True``\n Shape:\n - Input: :math:`(N, C, D, H, W)`\n - Output: :math:`(N, C, D, H, W)` (same shape as input)\n Examples::\n >>> # With Learnable Parameters\n >>> m = nn.BatchNorm3d(100)\n >>> # Without Learnable Parameters\n >>> m = nn.BatchNorm3d(100, affine=False)\n >>> input = torch.randn(20, 100, 35, 45, 10)\n >>> output = m(input)\n .. _`Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`:\n https://arxiv.org/abs/1502.03167\n "
def _check_input_dim(self, input):
if (input.dim() != 5):
raise ValueError('expected 5D input (got {}D input)'.format(input.dim()))
|
def gap_aware_adam_init(optimizer):
for pg in optimizer.param_groups:
for p in pg['params']:
state = optimizer.state[p]
if (len(state) == 0):
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['step'] = 0
state['exp_step_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
|
def opt_params_iter(optimizer):
return chain(*[pg['params'] for pg in optimizer.param_groups])
|
def adam_gap1(beta1, beta2, eps, eta, gt, m, v):
tmp0 = (1 - beta1)
sq_tmp0 = math.sqrt(tmp0)
tmp1 = (tmp0 * eps)
tmp2 = (m * (eta * beta1))
nom1 = tmp2
dnom1 = (tmp1 + (torch.sqrt(v) * sq_tmp0))
e1 = (nom1 / dnom1)
nom2_1 = tmp2
nom2_2 = (gt * (tmp0 * eta))
nom2 = (nom2_1 + nom2_2)
tmp4 = (tmp0 / math.sqrt((1 - beta2)))
dnom2 = ((beta2 * v) + ((gt ** 2) * (1 - beta2))).sqrt_().mul_(tmp4).add_(tmp1)
e2 = (nom2 / dnom2)
return (e1 - e2)
|
class AdamGapAware(GapAwareBase):
' Gap aware for ADAM optimizer '
def __init__(self, optimizer, from_grad=False):
' Apply Gap Aware on computed gradients '
super().__init__(optimizer)
gap_aware_adam_init(optimizer)
def apply_from_grad(self):
' Calculate gap aware from gradient. Requires knowing the exact gap.\n Requires: weight prediction.\n # TODO: to handle some micro batches which do not have grad, we can aggregate updates for stale gradients with extra memory cost \n # (its not THAT much)\n '
opt_state = self.optimizer.state
with torch.no_grad():
for pg in self.optimizer.param_groups:
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
eta = pg['lr']
if (weight_decay != 0):
raise NotImplementedError()
for p in pg['params']:
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'] ** 0.5) + eps)
m = opt_state[p]['exp_avg']
v = opt_state[p]['exp_avg_sq']
gt = p.grad
gap = adam_gap1(beta1, beta2, eps, eta, gt, m, v).abs_()
penalty = (1 + (gap / avg_steps_needed))
p.grad.data /= penalty
def apply_on_stashed(self, stashed_theta):
' True weights are loaded into the model, and given a stashed theta '
opt_state = self.optimizer.state
with torch.no_grad():
for (pg, spg) in zip(self.optimizer.param_groups, stashed_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p, sp) in zip(pg['params'], spg):
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'].data ** 0.5) + eps)
gap = (p - sp).abs()
penalty = (1 + (gap / avg_steps_needed))
p.grad.data /= penalty
p.grad.data += p.data.mul((weight_decay * ((1 - penalty) / penalty)))
def apply_on_theta(self, real_theta):
opt_state = self.optimizer.state
penatly_arr = []
with torch.no_grad():
for (pg, rpg) in zip(self.optimizer.param_groups, real_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p, rp) in zip(pg['params'], rpg):
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'].data ** 0.5) + eps)
gap = (p - rp).abs()
penalty = (1 + (gap / avg_steps_needed))
penatly_arr.append(torch.mean(penalty).item())
p.grad.data /= penalty
p.grad.data += rp.data.mul((weight_decay * ((1 - penalty) / penalty)))
print('mean_penaltly', np.mean(penatly_arr))
def update_running_stats(self):
pass
|
def get_adam_gap_aware_cls() -> Type[AdamGapAware]:
gap_aware_cls = AdamGapAware
return gap_aware_cls
|
def gap_aware_adam_init(optimizer):
for pg in optimizer.param_groups:
for p in pg['params']:
state = optimizer.state[p]
if (len(state) == 0):
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
state['step'] = 0
state['exp_step_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
|
def opt_params_iter(optimizer):
return chain(*[pg['params'] for pg in optimizer.param_groups])
|
def adam_gap1(beta1, beta2, eps, eta, gt, m, v):
tmp0 = (1 - beta1)
sq_tmp0 = math.sqrt(tmp0)
tmp1 = (tmp0 * eps)
tmp2 = (m * (eta * beta1))
nom1 = tmp2
dnom1 = (tmp1 + (torch.sqrt(v) * sq_tmp0))
e1 = (nom1 / dnom1)
nom2_1 = tmp2
nom2_2 = (gt * (tmp0 * eta))
nom2 = (nom2_1 + nom2_2)
tmp4 = (tmp0 / math.sqrt((1 - beta2)))
dnom2 = ((beta2 * v) + ((gt ** 2) * (1 - beta2))).sqrt_().mul_(tmp4).add_(tmp1)
e2 = (nom2 / dnom2)
return (e1 - e2)
|
class AdamGapAware(GapAwareBase):
' Gap aware for ADAM optimizer '
def __init__(self, optimizer, from_grad=False):
' Apply Gap Aware on computed gradients '
super().__init__(optimizer)
gap_aware_adam_init(optimizer)
def apply_from_grad(self):
' Calculate gap aware from gradient. Requires knowing the exact gap.\n Requires: weight prediction.\n # TODO: to handle some micro batches which do not have grad, we can aggregate updates for stale gradients with extra memory cost \n # (its not THAT much)\n '
opt_state = self.optimizer.state
with torch.no_grad():
for pg in self.optimizer.param_groups:
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
eta = pg['lr']
if (weight_decay != 0):
raise NotImplementedError()
for p in pg['params']:
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'] ** 0.5) + eps)
m = opt_state[p]['exp_avg']
v = opt_state[p]['exp_avg_sq']
gt = p.grad
gap = adam_gap1(beta1, beta2, eps, eta, gt, m, v).abs_()
penalty = (1 + (gap / avg_steps_needed))
p.grad.data /= penalty
def apply_on_stashed(self, stashed_theta):
' True weights are loaded into the model, and given a stashed theta '
opt_state = self.optimizer.state
with torch.no_grad():
for (pg, spg) in zip(self.optimizer.param_groups, stashed_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p, sp) in zip(pg['params'], spg):
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'].data ** 0.5) + eps)
gap = (p - sp).abs()
penalty = (1 + (gap / (max_lr * avg_steps_needed)))
p.grad.data /= penalty
p.grad.data += p.data.mul((weight_decay * ((1 - penalty) / penalty)))
def apply_on_theta(self, real_theta):
opt_state = self.optimizer.state
penatly_arr = []
with torch.no_grad():
for (pg, rpg) in zip(self.optimizer.param_groups, real_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p, rp) in zip(pg['params'], rpg):
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'].data ** 0.5) + eps)
gap = (p - rp).abs()
penalty = (1 + (gap / avg_steps_needed))
penatly_arr.append(torch.mean(penalty).item())
p.grad.data /= penalty
p.grad.data += rp.data.mul((weight_decay * ((1 - penalty) / penalty)))
print('mean_penaltly', np.mean(penatly_arr))
def update_running_stats(self):
pass
|
def get_adam_gap_aware_cls() -> Type[AdamGapAware]:
gap_aware_cls = AdamGapAware
return gap_aware_cls
|
def opt_params_iter(optimizer):
return chain(*[pg['params'] for pg in optimizer.param_groups])
|
class AdamWGapAware(GapAwareBase):
' Gap aware for ADAMW optimizer\n ADAMW\n https://arxiv.org/pdf/1711.05101.pdf\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n # NOTE: it will also effect our weight prediction!!\n # Instead we want to decay the weights in a manner that doesn\'t interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n\n\n Based on pytorch ADAMW implementation\n https://pytorch.org/docs/stable/_modules/torch/optim/adamw.html#AdamW\n NOTE: were straight at the beggining we do\n # Perform stepweight decay\n p.data.mul_(1 - group[\'lr\'] * group[\'weight_decay\'])\n\n\n NOTE: not all implemenetations are equivalent, e.g transformers AdamW version\n does the update at the end (like the paper).\n I found that doing at the beggining is little better for weight prediction for SGD so I choose pytorch.\n\n https://github.com/huggingface/transformers/blob/19a63d8245f4ce95595a8be657eb669d6491cdf8/src/transformers/optimization.py#L96\n NOTE: they do it at the end, like the paper.\n if group["weight_decay"] > 0.0:\n p.data.add_(-group["lr"] * group["weight_decay"], p.data)\n\n '
def __init__(self, optimizer, from_grad=True):
' Apply Gap Aware on computed gradients '
super().__init__(optimizer)
gap_aware_adam_init(optimizer)
def apply_from_grad(self):
' Calculate gap aware from gradient. Requires knowing the exact gap '
raise NotImplementedError()
def apply_on_stashed(self, stashed_theta):
' True weights are loaded into the model, and given a stashed theta '
opt_state = self.optimizer.state
with torch.no_grad():
for (pg, spg) in zip(self.optimizer.param_groups, stashed_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
lr = pg['lr']
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p, sp) in zip(pg['params'], spg):
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'].data ** 0.5) + eps)
gap = (p - sp).abs()
penalty = (1 + (gap / avg_steps_needed))
p.grad.data /= penalty
p.data.mul_((1 - ((weight_decay * lr) * ((1 - penalty) / penalty))))
raise NotImplementedError()
def apply_on_theta(self, real_theta):
opt_state = self.optimizer.state
with torch.no_grad():
for (pg, rpg) in zip(self.optimizer.param_groups, real_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
lr = pg['lr']
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p, rp) in zip(pg['params'], rpg):
avg_steps_needed = ((opt_state[p]['exp_step_avg_sq'].data ** 0.5) + eps)
gap = (p - rp).abs()
penalty = (1 + (gap / avg_steps_needed))
p.grad.data /= penalty
rp.data.mul_((1 - ((weight_decay * lr) * ((1 - penalty) / penalty))))
def update_running_stats(self):
pass
|
def get_adamw_gap_aware_cls() -> Type[AdamWGapAware]:
gap_aware_cls = AdamWGapAware
return gap_aware_cls
|
class HookFactory(ABC):
def __init__(self):
self.batch_to_hooks = dict()
self.current_handles = None
def register_hooks(self, batch_idx):
self.current_handles = [p.register_hook(hook) for (p, hook) in self.batch_to_hooks.pop(batch_idx)]
def remove_current_handles(self):
for h in self.current_handles:
h.remove()
@abstractmethod
def create_apply_hooks_on_stashed(self, batch_idx, get_stashed_theta_fn):
pass
|
class AdamGAHookFactory(HookFactory):
def __init__(self):
super().__init__()
def create_apply_hooks_on_stashed(self, batch_idx, get_stashed_theta_fn):
opt_state = self.optimizer.state
all_hooks = []
for (pg_idx, pg) in enumerate(self.optimizer.param_groups):
max_lr = pg['max_lr']
if (max_lr <= 0):
continue
(beta1, beta2) = pg['betas']
eps = pg['eps']
for (p_idx, p) in enumerate(pg['params']):
exp_step_avg_sq = opt_state[p]['exp_step_avg_sq']
def hook(grad):
with torch.no_grad():
stashed_theta = get_stashed_theta_fn(batch_idx)
sp = stashed_theta[pg_idx][p_idx]
avg_steps_needed = ((exp_step_avg_sq ** 0.5) + eps)
gap = (p - sp).abs()
penalty = (1 + (gap / avg_steps_needed))
return (grad / penalty)
all_hooks.append((p, hook))
self.batch_to_hooks[batch_idx] = all_hooks
|
class GapAwareBase(abc.ABC):
'\n GAP aware implementation for pipeline,\n based on\n https://arxiv.org/abs/1909.10802\n We can apply it if one of the following happends:\n 1. we stash the parameters theta we did forwad on (so we could calculate the gap)\n 2. the gap is easy (e.g the gradient)\n\n Notes:\n It adds memory footprint for SGD. (number of parameters in optimizer)\n\n Warning:\n Will not work with gradient accumulation as it changes grad !!!\n\n This implementation assumes staleness=1, so it should shut down for the first batch.\n\n\n Usage:\n \n call GapAwareBase.patch_scheduler(scheduler) to track max lr.\n\n After backward:\n\n update_running_stats()\n apply() (if delay is > 0)\n\n Example:\n\n scheduler = ...\n optimizer = ...\n ga = ...\n\n ga.patch_scheduler(scheduler)\n\n loss = ...\n\n loss.backward()\n\n ga.update_running_stats()\n ga.apply()\n\n # Send gradients in pipeline\n\n optimizer.step()\n scheduler.step()\n\n TODO:\n Support working for all layers of pipeline\n Think about implementation with L2.\n Think about implementation with hooks. (can be tricky)\n '
MAX_LR_NAME = 'max_lr'
def __init__(self, optimizer, initial_max_lr=None):
' Apply Gap Aware on computed gradients '
if (initial_max_lr is None):
for pg in optimizer.param_groups:
pg[GapAwareBase.MAX_LR_NAME] = pg['lr']
else:
for pg in optimizer.param_groups:
pg[GapAwareBase.MAX_LR_NAME] = initial_max_lr
self.optimizer = optimizer
self.step_count = 0
def inc_step_count(self):
self.step_count += 1
def update_running_avg(self):
'in case there is some running avg to update'
pass
def update_running_stats(self):
' Basic method for updating running statistics '
self.update_running_avg()
self.inc_step_count()
@abc.abstractmethod
def apply_from_grad(self):
' Calculate gap aware from gradient. Requires knowing the exact gap '
raise NotImplementedError()
@abc.abstractmethod
def apply_on_stashed(self, stashed_theta):
' True weights are loaded into the model, and given a stashed theta '
raise NotImplementedError()
@abc.abstractmethod
def apply_on_theta(self, real_theta):
raise NotImplementedError()
@staticmethod
def patch_scheduler(scheduler):
def step_decorator(func):
@wraps(func)
def inner(self, *args, **kwargs):
func(self, *args, **kwargs)
for pg in self.optimizer.param_groups:
pg[GapAwareBase.MAX_LR_NAME] = max(pg[GapAwareBase.MAX_LR_NAME], pg['lr'])
return types.MethodType(inner, scheduler)
scheduler.step = step_decorator(scheduler.step.__func__)
print(f'Scheduler.step() patched to also track max lr in pg[{GapAwareBase.MAX_LR_NAME}]')
|
def init_running_avg_step(optimizer):
opt_params_iter = chain(*[pg['params'] for pg in optimizer.param_groups])
running_avg_step = {id(p): torch.zeros_like(p) for p in opt_params_iter}
return running_avg_step
|
class GapAware(GapAwareBase):
'\n GAP aware implementation for pipeline,\n based on\n https://arxiv.org/abs/1909.10802\n We can apply it if one of the following happends:\n 1. we stash the parameters theta we did forwad on (so we could calculate the gap)\n 2. the gap is easy (e.g the gradient)\n\n Notes:\n It adds memory footprint. (number of parameters in optimizer for SGD)\n\n Warning:\n Will not work with gradient accumulation as it changes grad !!!\n\n This implementaion assumes staleness=1, so it should shut down for the first batch.\n\n\n Usage:\n\n Before apply (After backward):\n \n # Update optimizer statistics\n\n update_running_avg()\n inc_step_count()\n\n\n # Before optimizer.step():\n # try_apply_wd_correction_before_step()\n\n apply_on_XXXX(XXXX) / apply_from_grad()\n\n\n\n Example:\n\n scheduler = ...\n optimizer = ...\n ga = ...\n loss = ...\n\n loss.backward()\n\n # IF WE DO STEP\n\n ga.update_running_avg()\n ga.inc_step_count()\n ga.apply_on_XXXX()\n\n # Send gradients in pipeline\n\n optimizer.step()\n scheduler.step()\n\n TODO:\n Support working for all layers of pipeline\n Think about implmentation with L2.\n Think about implementation with hooks. (can be tricky)\n\n\n # FIXME: deprecated docstring, but usefull for ga hook\n # # Apply on gradients:\n # apply_grad_only()\n\n # WARNINING: MUST HAVE A CORRESPONDING CALL TO try_apply_wd_correction_before_step()\n '
def __init__(self, optimizer, big_gamma=0.999, epsilon=1e-08, from_grad=True):
' Apply Gap Aware on computed gradients '
super().__init__(optimizer)
self.big_gamma = big_gamma
self.running_avg_step = init_running_avg_step(optimizer)
self.epsilon = epsilon
for pg in self.optimizer.param_groups:
for p in pg['params']:
if ('momentum_buffer' not in self.optimizer.state[p]):
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p)
def update_running_avg(self):
'\n Update the exponential step running average\n Requires: that we got some grad.\n '
opt_s = self.optimizer.state
ra = self.running_avg_step
bg = self.big_gamma
with torch.no_grad():
for pg in self.optimizer.param_groups:
if (pg['momentum'] != 0):
for p in pg['params']:
ra[id(p)] = ((bg * ra[id(p)]) + ((1 - bg) * (opt_s[p]['momentum_buffer'] ** 2)))
else:
for p in pg['params']:
ra[id(p)] = ((bg * ra[id(p)]) + ((1 - bg) * (p.grad ** 2)))
def apply_from_grad(self):
' Calculate gap aware from gradient. Requires knowing the exact gap '
with torch.no_grad():
ra = self.running_avg_step
bias_correction = (1 - (self.big_gamma ** self.step_count))
eps = self.epsilon
for pg in self.optimizer.param_groups:
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
for p in pg['params']:
avg_steps_needed = (max_lr * (((ra[id(p)] / bias_correction) ** 0.5) + eps))
penalty = (1 + ((pg['lr'] * p.grad.abs()) / avg_steps_needed))
p.grad /= penalty
p.grad += p.mul((weight_decay * ((1 - penalty) / penalty)))
def apply_on_theta(self, real_theta):
with torch.no_grad():
ra = self.running_avg_step
bias_correction = (1 - (self.big_gamma ** self.step_count))
eps = self.epsilon
for (pg, rpg) in zip(self.optimizer.param_groups, real_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
for (p, rp) in zip(pg['params'], rpg):
avg_steps_needed = (max_lr * (((ra[id(p)] / bias_correction) ** 0.5) + eps))
gap = (p - rp).abs()
penalty = (1 + (gap / avg_steps_needed))
p.grad /= penalty
p.grad += rp.mul((weight_decay * ((1 - penalty) / penalty)))
def apply_on_stashed(self, stashed_theta):
' True weights are loaded into the model, and given a stashed theta '
with torch.no_grad():
ra = self.running_avg_step
bias_correction = (1 - (self.big_gamma ** self.step_count))
eps = self.epsilon
for (pg, spg) in zip(self.optimizer.param_groups, stashed_theta):
max_lr = pg[GapAwareBase.MAX_LR_NAME]
if (max_lr <= 0):
continue
weight_decay = pg['weight_decay']
for (p, sp) in zip(pg['params'], spg):
avg_steps_needed = (max_lr * (((ra[id(p)] / bias_correction) ** 0.5) + eps))
gap = (p - sp).abs()
penalty = (1 + (gap / avg_steps_needed))
p.grad /= penalty
p.grad += p.mul((weight_decay * ((1 - penalty) / penalty)))
|
def get_sgd_gap_aware_cls(sgd_type: str) -> GapAware:
gap_aware_cls = SGD_TYPE_TO_GAP_AWARE_CLASS.get(sgd_type, None)
return gap_aware_cls
|
def convert_child_by_dict(model, dict_id_b4_to_after):
if (not dict_id_b4_to_after):
return
for (child_name, child) in model.named_children():
if (id(child) in dict_id_b4_to_after):
setattr(model, child_name, dict_id_b4_to_after[id(child)])
else:
convert_child_by_dict(child, dict_id_b4_to_after)
|
class DummyForwardMonkeyPatcher():
def __init__(self, model, classes_list_to_patch):
' List of model names to patch '
self.model = model
self.classes_to_patch = classes_list_to_patch
self.models = []
self.encapsulators = []
self.fmodels = []
self.state_is_dummy = False
for model_to_patch in self.classes_to_patch:
found = []
find_modules(model, '', model_to_patch, found)
self.models += [i[1] for i in found]
monkey_patched_enc_tuples = [dummy_forward_monkeypatch(orig_model) for orig_model in self.models]
self.fmodels += [i[0] for i in monkey_patched_enc_tuples]
self.encapsulators += [i[1] for i in monkey_patched_enc_tuples]
self.id_models_to_fmodels = {id(m): fm for (m, fm) in zip(self.models, self.fmodels)}
self.id_fmodels_to_models = {id(fm): m for (m, fm) in zip(self.models, self.fmodels)}
def replace_for_dummy(self):
if (not self.state_is_dummy):
convert_child_by_dict(self.model, self.id_models_to_fmodels)
self.state_is_dummy = True
def replace_for_forward(self):
if self.state_is_dummy:
convert_child_by_dict(self.model, self.id_fmodels_to_models)
self.state_is_dummy = False
def sync(self):
for (encapsulator, fmodule, module) in zip(self.encapsulators, self.fmodels, self.models):
encapsulator(fmodule, module)
|
def test():
import torch
features = 3
batch = 3
model = torch.nn.Sequential(torch.nn.Linear(features, features), torch.nn.BatchNorm1d(features))
patcher = DummyForwardMonkeyPatcher(model, classes_list_to_patch=[torch.nn.BatchNorm1d])
patcher.sync()
print(model)
patcher.replace_for_dummy()
model(torch.randn(features, features))
print(model)
print(model[1].state_dict())
patcher.replace_for_forward()
print(model)
print(model[1].state_dict())
print()
print('now updating')
y_pred = model(torch.randn(batch, features))
print(model)
print(model[1].state_dict())
patcher.sync()
patcher.replace_for_dummy()
print(model)
print(model[1].state_dict())
patcher.replace_for_forward()
print(('-' * 89))
print('now with grad')
loss_fn = torch.nn.MSELoss()
y = torch.randn(batch, features)
loss_fn(y_pred, y).backward()
optimizer = torch.optim.SGD(model.parameters(), 0.1, 0.9)
optimizer.step()
print(model)
print(model[1].state_dict())
patcher.replace_for_dummy()
print(model)
print(model[1].state_dict())
|
def test_no_grad_and_bwd():
import torch
features = 3
batch = 3
model = torch.nn.Sequential(torch.nn.Linear(features, features), torch.nn.BatchNorm1d(features))
patcher = DummyForwardMonkeyPatcher(model, classes_list_to_patch=[torch.nn.BatchNorm1d])
patcher.sync()
patcher.replace_for_dummy()
with torch.no_grad():
res = model(torch.randn(batch, features))
patcher.replace_for_forward()
torch.nn.functional.mse_loss(model(torch.randn(batch, features)), torch.randn(batch, features)).backward()
|
def find_modules(module, module_name, module_instance, found):
"\n Recursively find all instances of a specific module inside a module.\n\n Arguments:\n module {nn.Module} -- Module to search on\n module_name {str} -- Name of the model to search on in the currect context (used to output access string)\n module_instance {nn.Module} -- Class of the module to search\n found {list} -- List to append results to.\n\n Result will be [(access_string, model),...] inside 'found'.\n\n # Adapted from facebook XLM repo\n\n Examples:\n\n 1. Example of finding inside a class comprehended of MODEL_NAMES:\n ```\n for name in self.MODEL_NAMES:\n find_modules(getattr(self, name),\n f'self.{name}', HashingMemory, self.memory_list)\n ```\n\n 2. Example finding PKMLayer inside txl:\n ```\n from find_modules import find_modules\n found = []\n find_modules(model, 'model', PKMLayer, found)\n print([t[0] for t in found])\n ```\n "
if isinstance(module, module_instance):
found.append((module_name, module))
else:
for (name, child) in module.named_children():
name = (('%s[%s]' if name.isdigit() else '%s.%s') % (module_name, name))
find_modules(child, name, module_instance, found)
|
def _patched_parameters(self, recurse: bool=True, time: _typing.Optional[int]=None) -> _typing.Iterable[_torch.Tensor]:
'Returns an iterator over monkey patched module fast parameters.\n\n Args:\n recurse (bool): if True, then yields fast parameters of this module\n and all submodules. Otherwise, this *still* yields parameters of\n this module and all submodules, and raises a warning. This keyword\n exists only to satisfy API compatibility with\n ``torch.nn.Module.parameters``.\n time (int or None): if None, the most recent fast parameters are\n provided. The int provided stands for the number of steps since the\n module was created. *Note* that the step counter is incremented\n every time parameters are updated, so this may not align with number\n of training or evaluations steps.\n\n Yields:\n Parameter: module fast weights.\n '
if (getattr(self, '_fast_params', None) is None):
raise Exception('Tried to get fast weights of a monkey patched module which does not encapsulate fast weights.')
if (not recurse):
_warnings.warn('Calling parameters with recurse=False on a monkey patched module still returns all the fast weights of of nested patched modules.')
time = ((- 1) if (time is None) else time)
return iter(self._fast_params[time])
|
class _MonkeyPatchBase(_abc.ABC, _torch.nn.Module):
@_abc.abstractmethod
def __init__(self) -> None:
self._param_mapping: _typing.List[int] = []
def forward(self):
raise NotImplementedError("The monkey-patching logic has failed to override self.forward on the new module, or you tried calling forward on a patched version of a module which doesn't have forward (e.g. ModuleList).")
def _expand_params(self, params: _typing.List[_torch.Tensor]) -> _typing.List[_torch.Tensor]:
return [params[index] for index in self._param_mapping]
@property
def init_fast_params(self):
return self._fast_params[0]
@property
def fast_params(self):
return (None if (self._fast_params is None) else self._fast_params[(- 1)])
@fast_params.setter
def fast_params(self, value):
value = list(value)
if (self._fast_params is None):
self._fast_params = []
self._fast_params.append(value)
|
def buffer_sync(module: _torch.nn.Module, fmodule: _MonkeyPatchBase, device: _typing.Optional[_torch.device]=None) -> None:
'One off sync (copy) of buffers in ``fmodule`` with those from ``module``.\n '
for (key, value) in module._buffers.items():
if (not _torch.is_tensor(value)):
fmodule._buffers[key] = value
elif (device is None):
fmodule._buffers[key] = value.clone().detach()
else:
fmodule._buffers[key] = value.clone().detach().to(device)
for (name, child) in module._modules.items():
if (name in fmodule._modules):
buffer_sync(child, fmodule._modules[name], device)
else:
raise KeyError('Did not find expected submodule {} of monkey-patched module {}.'.format(name, fmodule))
|
class _ParameterPlaceholder():
def __init__(self, name: str) -> None:
self._param_name = name
def __repr__(self) -> str:
return 'Parameter placeholder ("{}")'.format(self._param_name)
|
def _make_functional(module: _torch.nn.Module, params_box: _typing.Sequence[_typing.Optional[_typing.List[_torch.Tensor]]], params_offset: int) -> _typing.Tuple[(int, _MonkeyPatchBase, _typing.Type[_MonkeyPatchBase])]:
if isinstance(module, _MonkeyPatchBase):
raise ValueError("Monkey-patching monkey-patched modules is untested uncharted territory, so we're going to assume it's done in error. If you are doing this intentionally and need this to be supported, contact the developers of this library.")
param_names = list((name for name in module._parameters.keys() if (module._parameters[name] is not None)))
_ModuleType: _typing.Type[_torch.nn.Module] = module.__class__
class MonkeyPatched(_ModuleType, _MonkeyPatchBase):
_wrapped_name = type(module).__name__
def __init__(self, original_params) -> None:
_torch.nn.Module.__init__(self)
self._fast_params = None
self._param_names = param_names
self._original_params = original_params
self._parameters = _OrderedDict(((name, _ParameterPlaceholder(name)) for name in self._param_names))
self._modules: _typing.Dict[(str, _MonkeyPatchBase)] = _OrderedDict()
def __setattr__(self, name, value):
def remove_from(*dicts):
for d in dicts:
if (name in d):
del d[name]
params = self.__dict__.get('_parameters')
if ((params is not None) and (name in params)):
if (not isinstance(value, _torch.Tensor)):
raise TypeError('Require Tensor as fast weights. Got {}'.format(_torch.typename(value)))
self._parameters[name] = value
else:
modules = self.__dict__.get('_modules')
if isinstance(value, _torch.nn.Module):
if (modules is None):
raise AttributeError('cannot assign module before Module.__init__() call')
remove_from(self.__dict__, self._parameters, self._buffers)
modules[name] = value
elif ((modules is not None) and (name in modules)):
if (value is not None):
raise TypeError("cannot assign '{}' as child module '{}'(torch.nn.Module or None expected)".format(_torch.typename(value), name))
modules[name] = value
else:
buffers = self.__dict__.get('_buffers')
if ((buffers is not None) and (name in buffers)):
if ((value is not None) and (not isinstance(value, _torch.Tensor))):
raise TypeError("cannot assign '{}' as buffer '{}' (torch.Tensor or None expected)".format(_torch.typename(value), name))
buffers[name] = value
else:
object.__setattr__(self, name, value)
def parameters(self) -> _typing.Iterable[_torch.Tensor]:
'This should only be used to check shape/dtype of original params.\n '
return self._original_params
MonkeyPatched.__name__ = ('InnerFunctional' + type(module).__name__)
MonkeyPatched.__qualname__ = MonkeyPatched.__name__
fmodule = MonkeyPatched(module.parameters())
num_params = len([1 for p in module._parameters.values() if (p is not None)])
for (name, attr) in module.__dict__.items():
if (name in _internal_attrs):
continue
setattr(fmodule, name, attr)
for (name, attr) in module.__dict__['_parameters'].items():
if isinstance(attr, _torch.nn.Parameter):
continue
else:
setattr(fmodule, name, attr)
child_params_offset = (params_offset + num_params)
for (name, child) in module._modules.items():
(child_params_offset, fchild, _) = _make_functional(child, params_box, child_params_offset)
fmodule._modules[name] = fchild
setattr(fmodule, name, fchild)
true_forward = type(module).forward
setattr(MonkeyPatched, 'forward', true_forward)
return (child_params_offset, fmodule, type(fmodule))
|
def _update_patched_params(fmodule: _MonkeyPatchBase, params_box: _typing.Sequence[_typing.List[_torch.Tensor]], params_offset: int) -> int:
num_params = len([1 for p in fmodule._parameters.values() if (p is not None)])
child_params_offset = (params_offset + num_params)
for (name, child) in fmodule._modules.items():
child_params_offset = _update_patched_params(child, params_box, child_params_offset)
for (name, param) in zip(fmodule._param_names, params_box[0][params_offset:(params_offset + num_params)]):
setattr(fmodule, name, param)
return child_params_offset
|
def make_functional(module: _torch.nn.Module, encapsulator: _EncapsulatorType=None) -> _MonkeyPatchBase:
'Returns a stateless version of an ``nn.Module`` instance.'
params_box = [None]
(_, fmodule, MonkeyPatched) = _make_functional(module, params_box, 0)
top_name = ('Functional' + MonkeyPatched._wrapped_name)
MonkeyPatched.__name__ = MonkeyPatched.__qualname__ = top_name
MonkeyPatched.boxed_forward = MonkeyPatched.forward
param_mapping = _utils._get_param_mapping(module, [], [])
setattr(fmodule, '_param_mapping', param_mapping)
def _patched_forward(self, *args, **kwargs):
if ('params' in kwargs):
params = kwargs.pop('params')
self.fast_params = params
if (not self.fast_params):
params_box[0] = []
return self.boxed_forward(*args, **kwargs)
def _update_params(self, params):
self.fast_params = params
params = self._expand_params(params)
_update_patched_params(self, [params], 0)
setattr(MonkeyPatched, 'parameters', _patched_parameters)
setattr(MonkeyPatched, 'update_params', _update_params)
if (encapsulator is not None):
encapsulator(fmodule, module)
return fmodule
|
def dummy_forward_monkeypatch(module: _torch.nn.Module) -> _MonkeyPatchBase:
'Create a monkey-patched stateless version of a module.\n\n This function produces a monkey-patched version of a module, and returns a\n copy of its parameters for use as fast weights. Where the original module\n or any of its submodules have state (e.g. batch norm), this will be copied\n too, but further updates (e.g. during inner loop training) will cause these\n to diverge without changing the state of the original module.\n\n Args:\n module: a ``torch.nn.Module`` subclass instance.\n\n Returns:\n ``fmodule``: a "stateless" version of the original module, for which calls\n to forward take the additional kwarg-only parameter ``params``, which\n should be a list of torch tensors requiring gradients, ideally\n provided by this function (see below) or by an update step from one\n of the optimizers in ``higher.optim``.\n ``encapsulator``: A function, so the user can use\n ``` encapsulator(fmodule, module) ```\n - Before first usage\n - And after every update to the original model\n '
def encapsulator(fmodule: _MonkeyPatchBase, module: _torch.nn.Module) -> None:
params = list(module.parameters())
buffer_sync(module, fmodule, None)
fmodule.update_params(params)
fmodule = make_functional(module, encapsulator=None)
return (fmodule, encapsulator)
|
def monkeypatch(module: _torch.nn.Module, device: _typing.Optional[_torch.device]=None, copy_initial_weights: bool=True) -> _MonkeyPatchBase:
'Create a monkey-patched stateless version of a module.\n\n This function produces a monkey-patched version of a module, and returns a\n copy of its parameters for use as fast weights. Where the original module\n or any of its submodules have state (e.g. batch norm), this will be copied\n too, but further updates (e.g. during inner loop training) will cause these\n to diverge without changing the state of the original module.\n\n Args:\n module: a ``torch.nn.Module`` subclass instance.\n device (optional): a device to cast the fast weights and state to.\n copy_initial_weights: if True, the weights of the patched module are\n copied to form the initial weights of the patched module, and thus\n are not part of the gradient tape when unrolling the patched module.\n If this is set to False, the actual module weights will be the\n initial weights of the patched module. This is useful when doing\n MAML, for example.\n\n Returns:\n ``fmodule``: a "stateless" version of the original module, for which calls\n to forward take the additional kwarg-only parameter ``params``, which\n should be a list of torch tensors requiring gradients, ideally\n provided by this function (see below) or by an update step from one\n of the optimizers in ``higher.optim``.\n '
def encapsulator(fmodule: _MonkeyPatchBase, module: _torch.nn.Module) -> None:
if copy_initial_weights:
params = _utils.get_func_params(module, device=device)
else:
params = [(p.clone() if (device is None) else p.clone().to(device)) for p in module.parameters()]
buffer_sync(module, fmodule, device)
fmodule.update_params(params)
fmodule = make_functional(module, encapsulator=encapsulator)
return fmodule
|
def _copy_tensor(t: _torch.Tensor, safe_copy: bool, device: _typing.Optional[_torch.device]=None) -> _torch.Tensor:
if safe_copy:
t = t.clone().detach().requires_grad_(t.requires_grad)
else:
t = t.detach().requires_grad_(t.requires_grad)
t = (t if (device is None) else t.to(device))
return t
|
def _recursive_copy_and_cast(target: _typing.Union[(list, tuple, dict, set, _torch.Tensor)], device: _typing.Optional[_torch.device]) -> _torch.Tensor:
def map_fn(x):
if _torch.is_tensor(x):
return _copy_tensor(x, True, device=device)
else:
return x
return _recursive_map(target, map_fn)
|
def _recursive_map(target: _typing.Union[(list, tuple, dict, set, _T)], map_fn: _typing.Callable[([_T], _U)]) -> _typing.Union[(list, tuple, dict, set, _U)]:
if isinstance(target, list):
return type(target)([_recursive_map(x, map_fn) for x in target])
elif isinstance(target, tuple):
return type(target)([_recursive_map(x, map_fn) for x in target])
elif isinstance(target, dict):
return type(target)({k: _recursive_map(v, map_fn) for (k, v) in target.items()})
elif isinstance(target, set):
return type(target)({_recursive_map(x, map_fn) for x in target})
else:
return map_fn(target)
|
def _is_container(target: _typing.Any) -> bool:
flag = (isinstance(target, list) or isinstance(target, tuple) or isinstance(target, dict) or isinstance(target, set))
return flag
|
def _find_param_in_list(param: _torch.Tensor, l: _typing.Iterable[_torch.Tensor]) -> _typing.Optional[int]:
for (i, p) in enumerate(l):
if (p is param):
return i
else:
return None
|
def _get_param_mapping(module: _torch.nn.Module, seen: _typing.List[_torch.Tensor], mapping: _typing.List[int]) -> _typing.List[int]:
for param in module._parameters.values():
if (param is None):
continue
found = _find_param_in_list(param, seen)
if (found is None):
mapping.append(len(seen))
seen.append(param)
else:
mapping.append(found)
for (name, child) in module._modules.items():
_ = _get_param_mapping(child, seen, mapping)
return mapping
|
def flatten(x: _typing.Any) -> _typing.List[_typing.Any]:
'Returns a flattened list of objects from a nested structure.'
l: _typing.List[_typing.Any] = []
if isinstance(x, dict):
for y in x.values():
l.extend(flatten(y))
elif (isinstance(x, list) or isinstance(x, set) or isinstance(x, tuple)):
for y in x:
l.extend(flatten(y))
else:
l.append(x)
return l
|
def get_func_params(module: _torch.nn.Module, device: _typing.Optional[_torch.device]=None, safe_copy: bool=True) -> _typing.List[_torch.Tensor]:
'Returns a detached copy of module parameters which requires gradient.'
params = [_copy_tensor(p, safe_copy, device) for p in module.parameters()]
return params
|
def get_buffers_for_ddp_sync(model, classes_to_patch=DEFAULT_CLASSES_LIST_TO_PATCH):
found = []
for model_to_patch in classes_to_patch:
find_modules(model, '', model_to_patch, found)
found = sorted(found, key=(lambda t: t[0]))
buffers = []
for (access_string, model) in found:
buffers.extend(sorted(model.named_buffers(), key=(lambda t: t[0])))
buffers = [t[1] for t in buffers]
return buffers
|
class Partition(nn.Module):
'\n Partition with recomputation.\n Should be used as Intermediate partition.\n\n saves activations.\n pop happens when we read the gradient.\n\n NOTE: there are other class for LastPartition and FirstPartition, to be used as needed.\n '
_REQ_GRAD = True
_HAS_DUMMY_FORWARD = True
_CLONE_INPUTS = True
def __init__(self, layers, device, to_device=True, classes_list_to_patch=DEFAULT_CLASSES_LIST_TO_PATCH, req_grad=None):
'\n :param layers: list of layers (or a single module)\n :param device: device of the partition\n '
super(Partition, self).__init__()
self.device = device
if isinstance(layers, list):
self.layers = nn.Sequential(*layers)
elif isinstance(layers, nn.Module):
self.layers = layers
self.layers.device = device
if _REPLACE_INPLACE:
if (self._HAS_DUMMY_FORWARD or self._REQ_GRAD):
is_replaced = replace_inplace_for_first_innermost_layer_(self.layers)
if is_replaced:
print('-W- replace_inplace_for_first_innermost_layer_=True')
self.dummy_forward_monkey_patcher = (DummyForwardMonkeyPatcher(self.layers, classes_list_to_patch) if self._HAS_DUMMY_FORWARD else None)
if ((self.dummy_forward_monkey_patcher is not None) and (not self.dummy_forward_monkey_patcher.models)):
self.dummy_forward_monkey_patcher = None
self.input_buffer = {}
self.bwd_graph_head_buffer = {}
self.rng_stasher = PartitionRngStasher(device=self.device)
self.req_grad = req_grad_dict_to_tuple(req_grad)
if to_device:
self.to(self.device)
def forward(self, x: TensorOrTensors, micro_batch_idx):
if self.training:
if self.dummy_forward_monkey_patcher:
self.dummy_forward_monkey_patcher.sync()
self.dummy_forward_monkey_patcher.replace_for_dummy()
with torch.no_grad():
if isinstance(x, Tensor):
if self._CLONE_INPUTS:
x = x.detach().clone().requires_grad_(self.req_grad[0])
else:
x = x.detach().requires_grad_(self.req_grad[0])
self.input_buffer[micro_batch_idx] = x
self.rng_stasher.stash_rng_state(micro_batch_idx)
x = self.layers(x)
else:
if self._CLONE_INPUTS:
x = list(get_dcr(x, self.req_grad))
else:
x = list(get_dr(x, self.req_grad))
self.input_buffer[micro_batch_idx] = x
self.rng_stasher.stash_rng_state(micro_batch_idx)
x = self.layers(*x)
if self.dummy_forward_monkey_patcher:
self.dummy_forward_monkey_patcher.replace_for_forward()
return x
else:
with torch.no_grad():
if self.dummy_forward_monkey_patcher:
self.dummy_forward_monkey_patcher.replace_for_forward()
if isinstance(x, Tensor):
x = self.layers(x)
else:
x = self.layers(*x)
return x
def recompute(self, micro_batch_idx):
x = self.input_buffer[micro_batch_idx]
if self.dummy_forward_monkey_patcher:
self.dummy_forward_monkey_patcher.replace_for_forward()
with torch.random.fork_rng(devices=self.rng_stasher.devices):
self.rng_stasher.restore_rng_state(micro_batch_idx)
x = self.layers(*x)
self.bwd_graph_head_buffer[micro_batch_idx] = x
def backward_from_recomputed(self, g, micro_batch_idx):
x = self.bwd_graph_head_buffer.pop(micro_batch_idx)
(x, g) = filter_for_backward(x, g)
torch.autograd.backward(x, g)
def get_grad(self, micro_batch_idx):
' returns an iteretable of grads '
x = self.input_buffer.pop(micro_batch_idx)
if isinstance(x, Tensor):
return (x.grad,)
else:
return [y.grad for y in filter_req_grad_tensors_for_send(x)]
def backward(self, g, **kw):
raise NotImplementedError()
|
class FirstPartition(Partition):
" The first partition does not need to record gradients of stashed inputs.\n This may save some memory.\n We don't clone inputs.\n We don't record gradients for inputs.\n "
_REQ_GRAD = False
_HAS_DUMMY_FORWARD = True
def __init__(self, *args, **kw):
super(FirstPartition, self).__init__(*args, **kw)
def forward(self, x: TensorOrTensors, micro_batch_idx):
if self.training:
if self.dummy_forward_monkey_patcher:
self.dummy_forward_monkey_patcher.sync()
self.dummy_forward_monkey_patcher.replace_for_dummy()
with torch.no_grad():
self.input_buffer[micro_batch_idx] = x
self.rng_stasher.stash_rng_state(micro_batch_idx)
if isinstance(x, Tensor):
x = self.layers(x)
else:
x = self.layers(*x)
if self.dummy_forward_monkey_patcher:
self.dummy_forward_monkey_patcher.replace_for_forward()
return x
else:
with torch.no_grad():
if self.dummy_forward_monkey_patcher:
self.dummy_forward_monkey_patcher.replace_for_forward()
if isinstance(x, Tensor):
x = self.layers(x)
else:
x = self.layers(*x)
return x
def recompute(self, micro_batch_idx):
x = self.input_buffer.pop(micro_batch_idx)
if self.dummy_forward_monkey_patcher:
self.dummy_forward_monkey_patcher.replace_for_forward()
with torch.random.fork_rng(devices=self.rng_stasher.devices):
self.rng_stasher.restore_rng_state(micro_batch_idx)
if isinstance(x, Tensor):
x = self.layers(x)
else:
x = self.layers(*x)
self.bwd_graph_head_buffer[micro_batch_idx] = x
def get_grad(self, micro_batch_idx):
return NotImplementedError()
|
class LastPartition(Partition):
_REQ_GRAD = True
_HAS_DUMMY_FORWARD = False
def __init__(self, *args, **kw):
super(LastPartition, self).__init__(*args, **kw)
def forward(self, x: TensorOrTensors, micro_batch_idx):
if self.training:
if isinstance(x, Tensor):
rg = self.req_grad[0]
if (not rg):
warnings.warn('Backpropagation will not happen beyond last partition since it has a single input which does not requires grad')
x = x.detach().requires_grad_(rg)
self.input_buffer[micro_batch_idx] = x
x = self.layers(x)
else:
x = list(get_dr(x, self.req_grad))
self.input_buffer[micro_batch_idx] = list(filter_req_grad_tensors(x))
x = self.layers(*x)
else:
with torch.no_grad():
if isinstance(x, Tensor):
x = self.layers(x)
else:
x = self.layers(*x)
if (not isinstance(x, Tensor)):
assert (len(x) == 1)
return x[0]
return x
def recompute(self, micro_batch_idx):
raise NotImplementedError()
|
class PartitionWithoutRecomputation(nn.Module):
_HAS_DUMMY_FORWARD = False
_CLONE_INPUTS = True
def __init__(self, layers, device, to_device=True, _REQ_GRAD=True, req_grad=None):
'\n Intermediate partition which does not do recomputation.\n HACK: has misleading names to be used with existing code.\n\n NOTE:\n (1) This partition should (ideally) be accompanied by weight stashing for async pipeline, \n but it also works without it.\n (2) use _REQ_GRAD=False for first partition\n '
super().__init__()
self.device = device
self._REQ_GRAD = _REQ_GRAD
if isinstance(layers, list):
self.layers = nn.Sequential(*layers)
elif isinstance(layers, nn.Module):
self.layers = layers
if _REPLACE_INPLACE:
if (self._HAS_DUMMY_FORWARD or self._REQ_GRAD):
is_replaced = replace_inplace_for_first_innermost_layer_(self.layers)
if is_replaced:
print('-W- replace_inplace_for_first_innermost_layer_=True')
warnings.warn('deprecated replace inplace, can turn this on and off manually')
if _REQ_GRAD:
self.input_buffer = {}
else:
def _get_grad(self, micro_batch_idx):
raise NotImplementedError()
self.get_grad = types.MethodType(_get_grad, self)
self.bwd_graph_head_buffer = {}
self.req_grad = req_grad_dict_to_tuple(req_grad)
if to_device:
self.to(self.device)
def forward(self, x: TensorOrTensors, micro_batch_idx):
if self.training:
if isinstance(x, Tensor):
if self._REQ_GRAD:
if self._CLONE_INPUTS:
x = x.detach().clone().requires_grad_(self.req_grad[0])
else:
x = x.detach().requires_grad_(self.req_grad[0])
self.input_buffer[micro_batch_idx] = x
x = self.layers(x)
else:
if self._REQ_GRAD:
if self._CLONE_INPUTS:
x = list(get_dcr(x, self.req_grad))
else:
x = list(get_dr(x, self.req_grad))
self.input_buffer[micro_batch_idx] = x
x = self.layers(*x)
self.bwd_graph_head_buffer[micro_batch_idx] = x
return x
else:
with torch.no_grad():
if isinstance(x, Tensor):
x = self.layers(x)
else:
x = self.layers(*x)
return x
def recompute(self, micro_batch_idx):
pass
def backward_from_recomputed(self, g, micro_batch_idx):
x = self.bwd_graph_head_buffer.pop(micro_batch_idx)
(x, g) = filter_for_backward(x, g)
torch.autograd.backward(x, g)
def get_grad(self, micro_batch_idx):
' returns an iterable of grads '
x = self.input_buffer.pop(micro_batch_idx)
if isinstance(x, Tensor):
return (x.grad,)
else:
return [y.grad for y in filter_req_grad_tensors_for_send(x)]
|
class FirstPartitionWithoutRecomputation(PartitionWithoutRecomputation):
' its Just a hack for GPIpe... '
_CLONE_INPUTS = False
def __init__(self, *args, **kw):
super().__init__(*args, _REQ_GRAD=False, **kw)
|
class GPipePartition(nn.Module):
' Do not do recomputation on the last micro batch\n we have to know if we are last micro batch at all functions.\n '
RECOMP_PARTITION_CLS = Partition
NO_RECOMP_PARTITION_CLS = PartitionWithoutRecomputation
_CLONE_INPUTS = True
def __init__(self, *args, **kw):
super().__init__()
self.is_last_micro_batch = False
self.recomputation_partition = self.RECOMP_PARTITION_CLS(*args, **kw)
self.no_recomputation_partition = self.NO_RECOMP_PARTITION_CLS(*args, **kw)
self.recomputation_partition._CLONE_INPUTS = self._CLONE_INPUTS
self.no_recomputation_partition._CLONE_INPUTS = self._CLONE_INPUTS
def forward(self, *args, **kw):
if self.is_last_micro_batch:
return self.no_recomputation_partition.forward(*args, **kw)
else:
return self.recomputation_partition.forward(*args, **kw)
def recompute(self, micro_batch_idx):
if (not self.is_last_micro_batch):
self.recomputation_partition.recompute(micro_batch_idx)
def backward_from_recomputed(self, g, micro_batch_idx):
if self.is_last_micro_batch:
self.no_recomputation_partition.backward_from_recomputed(g, micro_batch_idx)
else:
self.recomputation_partition.backward_from_recomputed(g, micro_batch_idx)
def get_grad(self, micro_batch_idx):
' returns a list of grads '
if self.is_last_micro_batch:
return self.no_recomputation_partition.get_grad(micro_batch_idx)
else:
return self.recomputation_partition.get_grad(micro_batch_idx)
def pop_saved_graph_head(self, micro_batch_idx):
" HACK, TODO: currently, the last partition backprop is done by trainer,\n # as sometimes we have loss_fn and sometimes we don't\n # so the correct behavior is to get the recomputation output (x)\n # and pass it to the trainer.\n # therefore, \n # self.partition.backward_from_recomputed(None, batch_idx)\n # which does the pop and does loss.backward(), (None is grad_tensor)\n # will not be called\n # so we have to pop ourselves...\n "
used_partition = (self.no_recomputation_partition if self.is_last_micro_batch else self.recomputation_partition)
return used_partition.bwd_graph_head_buffer.pop(micro_batch_idx)
@property
def layers(self):
return self.recomputation_partition.layers
|
class GPipeFirstPartition(GPipePartition):
' Do not do recomputation on the last micro batch '
RECOMP_PARTITION_CLS = FirstPartition
NO_RECOMP_PARTITION_CLS = FirstPartitionWithoutRecomputation
_CLONE_INPUTS = False
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
|
class GPipeLastPartition(GPipePartition):
' NOTE: for doing backward_fro_recomputed,just pass (NONE) as grad_tensor '
RECOMP_PARTITION_CLS = Partition
NO_RECOMP_PARTITION_CLS = LastPartition
_CLONE_INPUTS = True
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def forward(self, x: TensorOrTensors, micro_batch_idx):
x = super().forward(x, micro_batch_idx)
if (not isinstance(x, Tensor)):
assert (len(x) == 1)
return x[0]
return x
|
def filter_for_backward(x, g):
x = list(filter_req_grad_tensors(x))
assert (len(x) == len(g))
tensors = []
grad_tensors = []
for (t, gt) in zip(x, g):
if t.requires_grad:
if (gt is not None):
tensors.append(t)
grad_tensors.append(gt)
elif _VERBOSE_ON_NONE_GRADIENTS:
print('-W- filtering NONE grad')
elif (gt is not None):
print(f'-W- calculated and sent a grad tensor for a tensor which does not requires grad, {gt.shape}')
return (tensors, grad_tensors)
|
def req_grad_dict_to_tuple(req_grad: Dict[(Any, bool)]) -> Tuple[bool]:
ret = tuple((v for (i, v) in req_grad.items()))
return ret
|
def assert_same_size(x, g):
assert (len(list(x)) == len(list(g))), str((len(list(x)), len(list(g))))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.