code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
import torch from .optimizer import Optimizer class RMSprop(Optimizer): r"""Implements RMSprop algorithm. Proposed by G. Hinton in his `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_. The centered version first appears in `Generating Sequences With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_. The implementation here takes the square root of the gradient average before adding epsilon (note that TensorFlow interchanges these two operations). The effective learning rate is thus :math:`\alpha/(\sqrt{v} + \epsilon)` where :math:`\alpha` is the scheduled learning rate and :math:`v` is the weighted moving average of the squared gradient. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) momentum (float, optional): momentum factor (default: 0) alpha (float, optional): smoothing constant (default: 0.99) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) centered (bool, optional) : if ``True``, compute the centered RMSProp, the gradient is normalized by an estimation of its variance weight_decay (float, optional): weight decay (L2 penalty) (default: 0) """ def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= momentum: raise ValueError("Invalid momentum value: {}".format(momentum)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= alpha: raise ValueError("Invalid alpha value: {}".format(alpha)) defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay) super(RMSprop, self).__init__(params, defaults) def __setstate__(self, state): super(RMSprop, self).__setstate__(state) for group in self.param_groups: group.setdefault('momentum', 0) group.setdefault('centered', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('RMSprop does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) if group['momentum'] > 0: state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format) if group['centered']: state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) square_avg = state['square_avg'] alpha = group['alpha'] state['step'] += 1 if group['weight_decay'] != 0: grad = grad.add(p, alpha=group['weight_decay']) square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) if group['centered']: grad_avg = state['grad_avg'] grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).sqrt_().add_(group['eps']) else: avg = square_avg.sqrt().add_(group['eps']) if group['momentum'] > 0: buf = state['momentum_buffer'] buf.mul_(group['momentum']).addcdiv_(grad, avg) p.add_(buf, alpha=-group['lr']) else: p.addcdiv_(grad, avg, value=-group['lr']) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/rmsprop.py
0.934197
0.732137
rmsprop.py
pypi
import torch from .optimizer import Optimizer class Adagrad(Optimizer): """Implements Adagrad algorithm. It has been proposed in `Adaptive Subgradient Methods for Online Learning and Stochastic Optimization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) lr_decay (float, optional): learning rate decay (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-10) .. _Adaptive Subgradient Methods for Online Learning and Stochastic Optimization: http://jmlr.org/papers/v12/duchi11a.html """ def __init__(self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= lr_decay: raise ValueError("Invalid lr_decay value: {}".format(lr_decay)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= initial_accumulator_value: raise ValueError("Invalid initial_accumulator_value value: {}".format(initial_accumulator_value)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) defaults = dict(lr=lr, lr_decay=lr_decay, eps=eps, weight_decay=weight_decay, initial_accumulator_value=initial_accumulator_value) super(Adagrad, self).__init__(params, defaults) for group in self.param_groups: for p in group['params']: state = self.state[p] state['step'] = 0 state['sum'] = torch.full_like(p, initial_accumulator_value, memory_format=torch.preserve_format) def share_memory(self): for group in self.param_groups: for p in group['params']: state = self.state[p] state['sum'].share_memory_() @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad state = self.state[p] state['step'] += 1 if group['weight_decay'] != 0: if p.grad.is_sparse: raise RuntimeError("weight_decay option is not compatible with sparse gradients") grad = grad.add(p, alpha=group['weight_decay']) clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay']) if grad.is_sparse: grad = grad.coalesce() # the update is non-linear so indices must be unique grad_indices = grad._indices() grad_values = grad._values() size = grad.size() def make_sparse(values): constructor = grad.new if grad_indices.dim() == 0 or values.dim() == 0: return constructor().resize_as_(grad) return constructor(grad_indices, values, size) state['sum'].add_(make_sparse(grad_values.pow(2))) std = state['sum'].sparse_mask(grad) std_values = std._values().sqrt_().add_(group['eps']) p.add_(make_sparse(grad_values / std_values), alpha=-clr) else: state['sum'].addcmul_(grad, grad, value=1) std = state['sum'].sqrt().add_(group['eps']) p.addcdiv_(grad, std, value=-clr) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/adagrad.py
0.908942
0.536009
adagrad.py
pypi
import types import math from torch._six import inf from functools import wraps import warnings import weakref from collections import Counter from bisect import bisect_right from .optimizer import Optimizer EPOCH_DEPRECATION_WARNING = ( "The epoch parameter in `scheduler.step()` was not necessary and is being " "deprecated where possible. Please use `scheduler.step()` to step the " "scheduler. During the deprecation, if epoch is different from None, the " "closed form is used instead of the new chainable form, where available. " "Please open an issue if you are unable to replicate your use case: " "https://github.com/pytorch/pytorch/issues/new/choose." ) SAVE_STATE_WARNING = "Please also save or load the state of the optimzer when saving or loading the scheduler." class _LRScheduler(object): def __init__(self, optimizer, last_epoch=-1): # Attach optimizer if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) self.optimizer = optimizer # Initialize epoch and base learning rates if last_epoch == -1: for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) else: for i, group in enumerate(optimizer.param_groups): if 'initial_lr' not in group: raise KeyError("param 'initial_lr' is not specified " "in param_groups[{}] when resuming an optimizer".format(i)) self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups)) self.last_epoch = last_epoch # Following https://github.com/pytorch/pytorch/issues/20124 # We would like to ensure that `lr_scheduler.step()` is called after # `optimizer.step()` def with_counter(method): if getattr(method, '_with_counter', False): # `optimizer.step()` has already been replaced, return. return method # Keep a weak reference to the optimizer instance to prevent # cyclic references. instance_ref = weakref.ref(method.__self__) # Get the unbound method for the same purpose. func = method.__func__ cls = instance_ref().__class__ del method @wraps(func) def wrapper(*args, **kwargs): instance = instance_ref() instance._step_count += 1 wrapped = func.__get__(instance, cls) return wrapped(*args, **kwargs) # Note that the returned function here is no longer a bound method, # so attributes like `__func__` and `__self__` no longer exist. wrapper._with_counter = True return wrapper self.optimizer.step = with_counter(self.optimizer.step) self.optimizer._step_count = 0 self._step_count = 0 self.step() def state_dict(self): """Returns the state of the scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. """ return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict): """Loads the schedulers state. Arguments: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ self.__dict__.update(state_dict) def get_last_lr(self): """ Return last computed learning rate by current scheduler. """ return self._last_lr def get_lr(self): # Compute learning rate using chainable form of the scheduler raise NotImplementedError def step(self, epoch=None): # Raise a warning if old pattern is detected # https://github.com/pytorch/pytorch/issues/20124 if self._step_count == 1: if not hasattr(self.optimizer.step, "_with_counter"): warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler " "initialization. Please, make sure to call `optimizer.step()` before " "`lr_scheduler.step()`. See more details at " "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning) # Just check if there were two first lr_scheduler.step() calls before optimizer.step() elif self.optimizer._step_count < 1: warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. " "In PyTorch 1.1.0 and later, you should call them in the opposite order: " "`optimizer.step()` before `lr_scheduler.step()`. Failure to do this " "will result in PyTorch skipping the first value of the learning rate schedule. " "See more details at " "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning) self._step_count += 1 class _enable_get_lr_call: def __init__(self, o): self.o = o def __enter__(self): self.o._get_lr_called_within_step = True return self def __exit__(self, type, value, traceback): self.o._get_lr_called_within_step = False with _enable_get_lr_call(self): if epoch is None: self.last_epoch += 1 values = self.get_lr() else: warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) self.last_epoch = epoch if hasattr(self, "_get_closed_form_lr"): values = self._get_closed_form_lr() else: values = self.get_lr() for param_group, lr in zip(self.optimizer.param_groups, values): param_group['lr'] = lr self._last_lr = [group['lr'] for group in self.optimizer.param_groups] class LambdaLR(_LRScheduler): """Sets the learning rate of each parameter group to the initial lr times a given function. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. lr_lambda (function or list): A function which computes a multiplicative factor given an integer parameter epoch, or a list of such functions, one for each group in optimizer.param_groups. last_epoch (int): The index of last epoch. Default: -1. Example: >>> # Assuming optimizer has two groups. >>> lambda1 = lambda epoch: epoch // 30 >>> lambda2 = lambda epoch: 0.95 ** epoch >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2]) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, lr_lambda, last_epoch=-1): self.optimizer = optimizer if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) else: if len(lr_lambda) != len(optimizer.param_groups): raise ValueError("Expected {} lr_lambdas, but got {}".format( len(optimizer.param_groups), len(lr_lambda))) self.lr_lambdas = list(lr_lambda) self.last_epoch = last_epoch super(LambdaLR, self).__init__(optimizer, last_epoch) def state_dict(self): """Returns the state of the scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. The learning rate lambda functions will only be saved if they are callable objects and not if they are functions or lambdas. """ warnings.warn(SAVE_STATE_WARNING, UserWarning) state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')} state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas) for idx, fn in enumerate(self.lr_lambdas): if not isinstance(fn, types.FunctionType): state_dict['lr_lambdas'][idx] = fn.__dict__.copy() return state_dict def load_state_dict(self, state_dict): """Loads the schedulers state. Arguments: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ warnings.warn(SAVE_STATE_WARNING, UserWarning) lr_lambdas = state_dict.pop('lr_lambdas') self.__dict__.update(state_dict) # Restore state_dict keys in order to prevent side effects # https://github.com/pytorch/pytorch/issues/32756 state_dict['lr_lambdas'] = lr_lambdas for idx, fn in enumerate(lr_lambdas): if fn is not None: self.lr_lambdas[idx].__dict__.update(fn) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.") return [base_lr * lmbda(self.last_epoch) for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)] class MultiplicativeLR(_LRScheduler): """Multiply the learning rate of each parameter group by the factor given in the specified function. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. lr_lambda (function or list): A function which computes a multiplicative factor given an integer parameter epoch, or a list of such functions, one for each group in optimizer.param_groups. last_epoch (int): The index of last epoch. Default: -1. Example: >>> lmbda = lambda epoch: 0.95 >>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, lr_lambda, last_epoch=-1): self.optimizer = optimizer if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) else: if len(lr_lambda) != len(optimizer.param_groups): raise ValueError("Expected {} lr_lambdas, but got {}".format( len(optimizer.param_groups), len(lr_lambda))) self.lr_lambdas = list(lr_lambda) self.last_epoch = last_epoch super(MultiplicativeLR, self).__init__(optimizer, last_epoch) def state_dict(self): """Returns the state of the scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. The learning rate lambda functions will only be saved if they are callable objects and not if they are functions or lambdas. """ state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')} state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas) for idx, fn in enumerate(self.lr_lambdas): if not isinstance(fn, types.FunctionType): state_dict['lr_lambdas'][idx] = fn.__dict__.copy() return state_dict def load_state_dict(self, state_dict): """Loads the schedulers state. Arguments: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ lr_lambdas = state_dict.pop('lr_lambdas') self.__dict__.update(state_dict) # Restore state_dict keys in order to prevent side effects # https://github.com/pytorch/pytorch/issues/32756 state_dict['lr_lambdas'] = lr_lambdas for idx, fn in enumerate(lr_lambdas): if fn is not None: self.lr_lambdas[idx].__dict__.update(fn) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch > 0: return [group['lr'] * lmbda(self.last_epoch) for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)] else: return list(self.base_lrs) class StepLR(_LRScheduler): """Decays the learning rate of each parameter group by gamma every step_size epochs. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. step_size (int): Period of learning rate decay. gamma (float): Multiplicative factor of learning rate decay. Default: 0.1. last_epoch (int): The index of last epoch. Default: -1. Example: >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.05 if epoch < 30 >>> # lr = 0.005 if 30 <= epoch < 60 >>> # lr = 0.0005 if 60 <= epoch < 90 >>> # ... >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1): self.step_size = step_size self.gamma = gamma super(StepLR, self).__init__(optimizer, last_epoch) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0): return [group['lr'] for group in self.optimizer.param_groups] return [group['lr'] * self.gamma for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [base_lr * self.gamma ** (self.last_epoch // self.step_size) for base_lr in self.base_lrs] class MultiStepLR(_LRScheduler): """Decays the learning rate of each parameter group by gamma once the number of epoch reaches one of the milestones. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. milestones (list): List of epoch indices. Must be increasing. gamma (float): Multiplicative factor of learning rate decay. Default: 0.1. last_epoch (int): The index of last epoch. Default: -1. Example: >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.05 if epoch < 30 >>> # lr = 0.005 if 30 <= epoch < 80 >>> # lr = 0.0005 if epoch >= 80 >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1): self.milestones = Counter(milestones) self.gamma = gamma super(MultiStepLR, self).__init__(optimizer, last_epoch) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch not in self.milestones: return [group['lr'] for group in self.optimizer.param_groups] return [group['lr'] * self.gamma ** self.milestones[self.last_epoch] for group in self.optimizer.param_groups] def _get_closed_form_lr(self): milestones = list(sorted(self.milestones.elements())) return [base_lr * self.gamma ** bisect_right(milestones, self.last_epoch) for base_lr in self.base_lrs] class ExponentialLR(_LRScheduler): """Decays the learning rate of each parameter group by gamma every epoch. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. gamma (float): Multiplicative factor of learning rate decay. last_epoch (int): The index of last epoch. Default: -1. """ def __init__(self, optimizer, gamma, last_epoch=-1): self.gamma = gamma super(ExponentialLR, self).__init__(optimizer, last_epoch) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch == 0: return self.base_lrs return [group['lr'] * self.gamma for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [base_lr * self.gamma ** self.last_epoch for base_lr in self.base_lrs] class CosineAnnealingLR(_LRScheduler): r"""Set the learning rate of each parameter group using a cosine annealing schedule, where :math:`\eta_{max}` is set to the initial lr and :math:`T_{cur}` is the number of epochs since the last restart in SGDR: .. math:: \begin{aligned} \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), & T_{cur} \neq (2k+1)T_{max}; \\ \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), & T_{cur} = (2k+1)T_{max}. \end{aligned} When last_epoch=-1, sets initial lr as lr. Notice that because the schedule is defined recursively, the learning rate can be simultaneously modified outside this scheduler by other operators. If the learning rate is set solely by this scheduler, the learning rate at each step becomes: .. math:: \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right) It has been proposed in `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only implements the cosine annealing part of SGDR, and not the restarts. Args: optimizer (Optimizer): Wrapped optimizer. T_max (int): Maximum number of iterations. eta_min (float): Minimum learning rate. Default: 0. last_epoch (int): The index of last epoch. Default: -1. .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: https://arxiv.org/abs/1608.03983 """ def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1): self.T_max = T_max self.eta_min = eta_min super(CosineAnnealingLR, self).__init__(optimizer, last_epoch) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch == 0: return self.base_lrs elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0: return [group['lr'] + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2 for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)] return [(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / (1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) * (group['lr'] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2 for base_lr in self.base_lrs] class ReduceLROnPlateau(object): """Reduce learning rate when a metric has stopped improving. Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. This scheduler reads a metrics quantity and if no improvement is seen for a 'patience' number of epochs, the learning rate is reduced. Args: optimizer (Optimizer): Wrapped optimizer. mode (str): One of `min`, `max`. In `min` mode, lr will be reduced when the quantity monitored has stopped decreasing; in `max` mode it will be reduced when the quantity monitored has stopped increasing. Default: 'min'. factor (float): Factor by which the learning rate will be reduced. new_lr = lr * factor. Default: 0.1. patience (int): Number of epochs with no improvement after which learning rate will be reduced. For example, if `patience = 2`, then we will ignore the first 2 epochs with no improvement, and will only decrease the LR after the 3rd epoch if the loss still hasn't improved then. Default: 10. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. threshold (float): Threshold for measuring the new optimum, to only focus on significant changes. Default: 1e-4. threshold_mode (str): One of `rel`, `abs`. In `rel` mode, dynamic_threshold = best * ( 1 + threshold ) in 'max' mode or best * ( 1 - threshold ) in `min` mode. In `abs` mode, dynamic_threshold = best + threshold in `max` mode or best - threshold in `min` mode. Default: 'rel'. cooldown (int): Number of epochs to wait before resuming normal operation after lr has been reduced. Default: 0. min_lr (float or list): A scalar or a list of scalars. A lower bound on the learning rate of all param groups or each group respectively. Default: 0. eps (float): Minimal decay applied to lr. If the difference between new and old lr is smaller than eps, the update is ignored. Default: 1e-8. Example: >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) >>> scheduler = ReduceLROnPlateau(optimizer, 'min') >>> for epoch in range(10): >>> train(...) >>> val_loss = validate(...) >>> # Note that step should be called after validate() >>> scheduler.step(val_loss) """ def __init__(self, optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=1e-4, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-8): if factor >= 1.0: raise ValueError('Factor should be < 1.0.') self.factor = factor # Attach optimizer if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) self.optimizer = optimizer if isinstance(min_lr, list) or isinstance(min_lr, tuple): if len(min_lr) != len(optimizer.param_groups): raise ValueError("expected {} min_lrs, got {}".format( len(optimizer.param_groups), len(min_lr))) self.min_lrs = list(min_lr) else: self.min_lrs = [min_lr] * len(optimizer.param_groups) self.patience = patience self.verbose = verbose self.cooldown = cooldown self.cooldown_counter = 0 self.mode = mode self.threshold = threshold self.threshold_mode = threshold_mode self.best = None self.num_bad_epochs = None self.mode_worse = None # the worse value for the chosen mode self.eps = eps self.last_epoch = 0 self._init_is_better(mode=mode, threshold=threshold, threshold_mode=threshold_mode) self._reset() def _reset(self): """Resets num_bad_epochs counter and cooldown counter.""" self.best = self.mode_worse self.cooldown_counter = 0 self.num_bad_epochs = 0 def step(self, metrics, epoch=None): # convert `metrics` to float, in case it's a zero-dim Tensor current = float(metrics) if epoch is None: epoch = self.last_epoch + 1 else: warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) self.last_epoch = epoch if self.is_better(current, self.best): self.best = current self.num_bad_epochs = 0 else: self.num_bad_epochs += 1 if self.in_cooldown: self.cooldown_counter -= 1 self.num_bad_epochs = 0 # ignore any bad epochs in cooldown if self.num_bad_epochs > self.patience: self._reduce_lr(epoch) self.cooldown_counter = self.cooldown self.num_bad_epochs = 0 self._last_lr = [group['lr'] for group in self.optimizer.param_groups] def _reduce_lr(self, epoch): for i, param_group in enumerate(self.optimizer.param_groups): old_lr = float(param_group['lr']) new_lr = max(old_lr * self.factor, self.min_lrs[i]) if old_lr - new_lr > self.eps: param_group['lr'] = new_lr if self.verbose: print('Epoch {:5d}: reducing learning rate' ' of group {} to {:.4e}.'.format(epoch, i, new_lr)) @property def in_cooldown(self): return self.cooldown_counter > 0 def is_better(self, a, best): if self.mode == 'min' and self.threshold_mode == 'rel': rel_epsilon = 1. - self.threshold return a < best * rel_epsilon elif self.mode == 'min' and self.threshold_mode == 'abs': return a < best - self.threshold elif self.mode == 'max' and self.threshold_mode == 'rel': rel_epsilon = self.threshold + 1. return a > best * rel_epsilon else: # mode == 'max' and epsilon_mode == 'abs': return a > best + self.threshold def _init_is_better(self, mode, threshold, threshold_mode): if mode not in {'min', 'max'}: raise ValueError('mode ' + mode + ' is unknown!') if threshold_mode not in {'rel', 'abs'}: raise ValueError('threshold mode ' + threshold_mode + ' is unknown!') if mode == 'min': self.mode_worse = inf else: # mode == 'max': self.mode_worse = -inf self.mode = mode self.threshold = threshold self.threshold_mode = threshold_mode def state_dict(self): return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict): self.__dict__.update(state_dict) self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode) class CyclicLR(_LRScheduler): r"""Sets the learning rate of each parameter group according to cyclical learning rate policy (CLR). The policy cycles the learning rate between two boundaries with a constant frequency, as detailed in the paper `Cyclical Learning Rates for Training Neural Networks`_. The distance between the two boundaries can be scaled on a per-iteration or per-cycle basis. Cyclical learning rate policy changes the learning rate after every batch. `step` should be called after a batch has been used for training. This class has three built-in policies, as put forth in the paper: * "triangular": A basic triangular cycle without amplitude scaling. * "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle. * "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}` at each cycle iteration. This implementation was adapted from the github repo: `bckenstler/CLR`_ Args: optimizer (Optimizer): Wrapped optimizer. base_lr (float or list): Initial learning rate which is the lower boundary in the cycle for each parameter group. max_lr (float or list): Upper learning rate boundaries in the cycle for each parameter group. Functionally, it defines the cycle amplitude (max_lr - base_lr). The lr at any cycle is the sum of base_lr and some scaling of the amplitude; therefore max_lr may not actually be reached depending on scaling function. step_size_up (int): Number of training iterations in the increasing half of a cycle. Default: 2000 step_size_down (int): Number of training iterations in the decreasing half of a cycle. If step_size_down is None, it is set to step_size_up. Default: None mode (str): One of {triangular, triangular2, exp_range}. Values correspond to policies detailed above. If scale_fn is not None, this argument is ignored. Default: 'triangular' gamma (float): Constant in 'exp_range' scaling function: gamma**(cycle iterations) Default: 1.0 scale_fn (function): Custom scaling policy defined by a single argument lambda function, where 0 <= scale_fn(x) <= 1 for all x >= 0. If specified, then 'mode' is ignored. Default: None scale_mode (str): {'cycle', 'iterations'}. Defines whether scale_fn is evaluated on cycle number or cycle iterations (training iterations since start of cycle). Default: 'cycle' cycle_momentum (bool): If ``True``, momentum is cycled inversely to learning rate between 'base_momentum' and 'max_momentum'. Default: True base_momentum (float or list): Lower momentum boundaries in the cycle for each parameter group. Note that momentum is cycled inversely to learning rate; at the peak of a cycle, momentum is 'base_momentum' and learning rate is 'max_lr'. Default: 0.8 max_momentum (float or list): Upper momentum boundaries in the cycle for each parameter group. Functionally, it defines the cycle amplitude (max_momentum - base_momentum). The momentum at any cycle is the difference of max_momentum and some scaling of the amplitude; therefore base_momentum may not actually be reached depending on scaling function. Note that momentum is cycled inversely to learning rate; at the start of a cycle, momentum is 'max_momentum' and learning rate is 'base_lr' Default: 0.9 last_epoch (int): The index of the last batch. This parameter is used when resuming a training job. Since `step()` should be invoked after each batch instead of after each epoch, this number represents the total number of *batches* computed, not the total number of epochs computed. When last_epoch=-1, the schedule is started from the beginning. Default: -1 Example: >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) >>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1) >>> data_loader = torch.utils.data.DataLoader(...) >>> for epoch in range(10): >>> for batch in data_loader: >>> train_batch(...) >>> scheduler.step() .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 .. _bckenstler/CLR: https://github.com/bckenstler/CLR """ def __init__(self, optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, last_epoch=-1): # Attach optimizer if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) self.optimizer = optimizer base_lrs = self._format_param('base_lr', optimizer, base_lr) if last_epoch == -1: for lr, group in zip(base_lrs, optimizer.param_groups): group['lr'] = lr self.max_lrs = self._format_param('max_lr', optimizer, max_lr) step_size_up = float(step_size_up) step_size_down = float(step_size_down) if step_size_down is not None else step_size_up self.total_size = step_size_up + step_size_down self.step_ratio = step_size_up / self.total_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.cycle_momentum = cycle_momentum if cycle_momentum: if 'momentum' not in optimizer.defaults: raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled') base_momentums = self._format_param('base_momentum', optimizer, base_momentum) if last_epoch == -1: for momentum, group in zip(base_momentums, optimizer.param_groups): group['momentum'] = momentum self.base_momentums = list(map(lambda group: group['momentum'], optimizer.param_groups)) self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum) super(CyclicLR, self).__init__(optimizer, last_epoch) self.base_lrs = base_lrs def _format_param(self, name, optimizer, param): """Return correctly formatted lr/momentum for each param group.""" if isinstance(param, (list, tuple)): if len(param) != len(optimizer.param_groups): raise ValueError("expected {} values for {}, got {}".format( len(optimizer.param_groups), name, len(param))) return param else: return [param] * len(optimizer.param_groups) def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 / (2. ** (x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): """Calculates the learning rate at batch index. This function treats `self.last_epoch` as the last batch index. If `self.cycle_momentum` is ``True``, this function has a side effect of updating the optimizer's momentum. """ if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) cycle = math.floor(1 + self.last_epoch / self.total_size) x = 1. + self.last_epoch / self.total_size - cycle if x <= self.step_ratio: scale_factor = x / self.step_ratio else: scale_factor = (x - 1) / (self.step_ratio - 1) lrs = [] for base_lr, max_lr in zip(self.base_lrs, self.max_lrs): base_height = (max_lr - base_lr) * scale_factor if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_epoch) lrs.append(lr) if self.cycle_momentum: momentums = [] for base_momentum, max_momentum in zip(self.base_momentums, self.max_momentums): base_height = (max_momentum - base_momentum) * scale_factor if self.scale_mode == 'cycle': momentum = max_momentum - base_height * self.scale_fn(cycle) else: momentum = max_momentum - base_height * self.scale_fn(self.last_epoch) momentums.append(momentum) for param_group, momentum in zip(self.optimizer.param_groups, momentums): param_group['momentum'] = momentum return lrs class CosineAnnealingWarmRestarts(_LRScheduler): r"""Set the learning rate of each parameter group using a cosine annealing schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}` is the number of epochs since the last restart and :math:`T_{i}` is the number of epochs between two warm restarts in SGDR: .. math:: \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + \cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right) When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`. When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`. It has been proposed in `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Args: optimizer (Optimizer): Wrapped optimizer. T_0 (int): Number of iterations for the first restart. T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1. eta_min (float, optional): Minimum learning rate. Default: 0. last_epoch (int, optional): The index of last epoch. Default: -1. .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: https://arxiv.org/abs/1608.03983 """ def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1): if T_0 <= 0 or not isinstance(T_0, int): raise ValueError("Expected positive integer T_0, but got {}".format(T_0)) if T_mult < 1 or not isinstance(T_mult, int): raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult)) self.T_0 = T_0 self.T_i = T_0 self.T_mult = T_mult self.eta_min = eta_min super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch) self.T_cur = self.last_epoch def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2 for base_lr in self.base_lrs] def step(self, epoch=None): """Step could be called after every batch update Example: >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> iters = len(dataloader) >>> for epoch in range(20): >>> for i, sample in enumerate(dataloader): >>> inputs, labels = sample['inputs'], sample['labels'] >>> optimizer.zero_grad() >>> outputs = net(inputs) >>> loss = criterion(outputs, labels) >>> loss.backward() >>> optimizer.step() >>> scheduler.step(epoch + i / iters) This function can be called in an interleaved way. Example: >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> for epoch in range(20): >>> scheduler.step() >>> scheduler.step(26) >>> scheduler.step() # scheduler.step(27), instead of scheduler(20) """ if epoch is None and self.last_epoch < 0: epoch = 0 if epoch is None: epoch = self.last_epoch + 1 self.T_cur = self.T_cur + 1 if self.T_cur >= self.T_i: self.T_cur = self.T_cur - self.T_i self.T_i = self.T_i * self.T_mult else: if epoch < 0: raise ValueError("Expected non-negative epoch, but got {}".format(epoch)) if epoch >= self.T_0: if self.T_mult == 1: self.T_cur = epoch % self.T_0 else: n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult)) self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1) self.T_i = self.T_0 * self.T_mult ** (n) else: self.T_i = self.T_0 self.T_cur = epoch self.last_epoch = math.floor(epoch) class _enable_get_lr_call: def __init__(self, o): self.o = o def __enter__(self): self.o._get_lr_called_within_step = True return self def __exit__(self, type, value, traceback): self.o._get_lr_called_within_step = False return self with _enable_get_lr_call(self): for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr self._last_lr = [group['lr'] for group in self.optimizer.param_groups] class OneCycleLR(_LRScheduler): r"""Sets the learning rate of each parameter group according to the 1cycle learning rate policy. The 1cycle policy anneals the learning rate from an initial learning rate to some maximum learning rate and then from that maximum learning rate to some minimum learning rate much lower than the initial learning rate. This policy was initially described in the paper `Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates`_. The 1cycle learning rate policy changes the learning rate after every batch. `step` should be called after a batch has been used for training. This scheduler is not chainable. Note also that the total number of steps in the cycle can be determined in one of two ways (listed in order of precedence): #. A value for total_steps is explicitly provided. #. A number of epochs (epochs) and a number of steps per epoch (steps_per_epoch) are provided. In this case, the number of total steps is inferred by total_steps = epochs * steps_per_epoch You must either provide a value for total_steps or provide a value for both epochs and steps_per_epoch. Args: optimizer (Optimizer): Wrapped optimizer. max_lr (float or list): Upper learning rate boundaries in the cycle for each parameter group. total_steps (int): The total number of steps in the cycle. Note that if a value is not provided here, then it must be inferred by providing a value for epochs and steps_per_epoch. Default: None epochs (int): The number of epochs to train for. This is used along with steps_per_epoch in order to infer the total number of steps in the cycle if a value for total_steps is not provided. Default: None steps_per_epoch (int): The number of steps per epoch to train for. This is used along with epochs in order to infer the total number of steps in the cycle if a value for total_steps is not provided. Default: None pct_start (float): The percentage of the cycle (in number of steps) spent increasing the learning rate. Default: 0.3 anneal_strategy (str): {'cos', 'linear'} Specifies the annealing strategy: "cos" for cosine annealing, "linear" for linear annealing. Default: 'cos' cycle_momentum (bool): If ``True``, momentum is cycled inversely to learning rate between 'base_momentum' and 'max_momentum'. Default: True base_momentum (float or list): Lower momentum boundaries in the cycle for each parameter group. Note that momentum is cycled inversely to learning rate; at the peak of a cycle, momentum is 'base_momentum' and learning rate is 'max_lr'. Default: 0.85 max_momentum (float or list): Upper momentum boundaries in the cycle for each parameter group. Functionally, it defines the cycle amplitude (max_momentum - base_momentum). Note that momentum is cycled inversely to learning rate; at the start of a cycle, momentum is 'max_momentum' and learning rate is 'base_lr' Default: 0.95 div_factor (float): Determines the initial learning rate via initial_lr = max_lr/div_factor Default: 25 final_div_factor (float): Determines the minimum learning rate via min_lr = initial_lr/final_div_factor Default: 1e4 last_epoch (int): The index of the last batch. This parameter is used when resuming a training job. Since `step()` should be invoked after each batch instead of after each epoch, this number represents the total number of *batches* computed, not the total number of epochs computed. When last_epoch=-1, the schedule is started from the beginning. Default: -1 Example: >>> data_loader = torch.utils.data.DataLoader(...) >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) >>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10) >>> for epoch in range(10): >>> for batch in data_loader: >>> train_batch(...) >>> scheduler.step() .. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates: https://arxiv.org/abs/1708.07120 """ def __init__(self, optimizer, max_lr, total_steps=None, epochs=None, steps_per_epoch=None, pct_start=0.3, anneal_strategy='cos', cycle_momentum=True, base_momentum=0.85, max_momentum=0.95, div_factor=25., final_div_factor=1e4, last_epoch=-1): # Validate optimizer if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) self.optimizer = optimizer # Validate total_steps if total_steps is None and epochs is None and steps_per_epoch is None: raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)") elif total_steps is not None: if total_steps <= 0 or not isinstance(total_steps, int): raise ValueError("Expected non-negative integer total_steps, but got {}".format(total_steps)) self.total_steps = total_steps else: if epochs <= 0 or not isinstance(epochs, int): raise ValueError("Expected non-negative integer epochs, but got {}".format(epochs)) if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int): raise ValueError("Expected non-negative integer steps_per_epoch, but got {}".format(steps_per_epoch)) self.total_steps = epochs * steps_per_epoch self.step_size_up = float(pct_start * self.total_steps) - 1 self.step_size_down = float(self.total_steps - self.step_size_up) - 1 # Validate pct_start if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): raise ValueError("Expected float between 0 and 1 pct_start, but got {}".format(pct_start)) # Validate anneal_strategy if anneal_strategy not in ['cos', 'linear']: raise ValueError("anneal_strategy must by one of 'cos' or 'linear', instead got {}".format(anneal_strategy)) elif anneal_strategy == 'cos': self.anneal_func = self._annealing_cos elif anneal_strategy == 'linear': self.anneal_func = self._annealing_linear # Initialize learning rate variables max_lrs = self._format_param('max_lr', self.optimizer, max_lr) if last_epoch == -1: for idx, group in enumerate(self.optimizer.param_groups): group['initial_lr'] = max_lrs[idx] / div_factor group['max_lr'] = max_lrs[idx] group['min_lr'] = group['initial_lr'] / final_div_factor # Initialize momentum variables self.cycle_momentum = cycle_momentum if self.cycle_momentum: if 'momentum' not in self.optimizer.defaults and 'betas' not in self.optimizer.defaults: raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled') self.use_beta1 = 'betas' in self.optimizer.defaults max_momentums = self._format_param('max_momentum', optimizer, max_momentum) base_momentums = self._format_param('base_momentum', optimizer, base_momentum) if last_epoch == -1: for m_momentum, b_momentum, group in zip(max_momentums, base_momentums, optimizer.param_groups): if self.use_beta1: _, beta2 = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['max_momentum'] = m_momentum group['base_momentum'] = b_momentum super(OneCycleLR, self).__init__(optimizer, last_epoch) def _format_param(self, name, optimizer, param): """Return correctly formatted lr/momentum for each param group.""" if isinstance(param, (list, tuple)): if len(param) != len(optimizer.param_groups): raise ValueError("expected {} values for {}, got {}".format( len(optimizer.param_groups), name, len(param))) return param else: return [param] * len(optimizer.param_groups) def _annealing_cos(self, start, end, pct): "Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0." cos_out = math.cos(math.pi * pct) + 1 return end + (start - end) / 2.0 * cos_out def _annealing_linear(self, start, end, pct): "Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0." return (end - start) * pct + start def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) lrs = [] step_num = self.last_epoch if step_num > self.total_steps: raise ValueError("Tried to step {} times. The specified number of total steps is {}" .format(step_num + 1, self.total_steps)) for group in self.optimizer.param_groups: if step_num <= self.step_size_up: computed_lr = self.anneal_func(group['initial_lr'], group['max_lr'], step_num / self.step_size_up) if self.cycle_momentum: computed_momentum = self.anneal_func(group['max_momentum'], group['base_momentum'], step_num / self.step_size_up) else: down_step_num = step_num - self.step_size_up computed_lr = self.anneal_func(group['max_lr'], group['min_lr'], down_step_num / self.step_size_down) if self.cycle_momentum: computed_momentum = self.anneal_func(group['base_momentum'], group['max_momentum'], down_step_num / self.step_size_down) lrs.append(computed_lr) if self.cycle_momentum: if self.use_beta1: _, beta2 = group['betas'] group['betas'] = (computed_momentum, beta2) else: group['momentum'] = computed_momentum return lrs
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/lr_scheduler.py
0.878471
0.343218
lr_scheduler.py
pypi
import math import torch from .optimizer import Optimizer class Adam(Optimizer): r"""Implements Adam algorithm. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) def __setstate__(self, state): super(Adam, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] if group['weight_decay'] != 0: grad = grad.add(p, alpha=group['weight_decay']) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/adam.py
0.896217
0.583411
adam.py
pypi
from collections import defaultdict from torch._six import container_abcs import torch from copy import deepcopy from itertools import chain class _RequiredParameter(object): """Singleton class representing a required parameter for an Optimizer.""" def __repr__(self): return "<required parameter>" required = _RequiredParameter() class Optimizer(object): r"""Base class for all optimizers. .. warning:: Parameters need to be specified as collections that have a deterministic ordering that is consistent between runs. Examples of objects that don't satisfy those properties are sets and iterators over values of dictionaries. Arguments: params (iterable): an iterable of :class:`torch.Tensor` s or :class:`dict` s. Specifies what Tensors should be optimized. defaults: (dict): a dict containing default values of optimization options (used when a parameter group doesn't specify them). """ def __init__(self, params, defaults): torch._C._log_api_usage_once("python.optimizer") self.defaults = defaults if isinstance(params, torch.Tensor): raise TypeError("params argument given to the optimizer should be " "an iterable of Tensors or dicts, but got " + torch.typename(params)) self.state = defaultdict(dict) self.param_groups = [] param_groups = list(params) if len(param_groups) == 0: raise ValueError("optimizer got an empty parameter list") if not isinstance(param_groups[0], dict): param_groups = [{'params': param_groups}] for param_group in param_groups: self.add_param_group(param_group) def __getstate__(self): return { 'defaults': self.defaults, 'state': self.state, 'param_groups': self.param_groups, } def __setstate__(self, state): self.__dict__.update(state) def __repr__(self): format_string = self.__class__.__name__ + ' (' for i, group in enumerate(self.param_groups): format_string += '\n' format_string += 'Parameter Group {0}\n'.format(i) for key in sorted(group.keys()): if key != 'params': format_string += ' {0}: {1}\n'.format(key, group[key]) format_string += ')' return format_string def state_dict(self): r"""Returns the state of the optimizer as a :class:`dict`. It contains two entries: * state - a dict holding current optimization state. Its content differs between optimizer classes. * param_groups - a dict containing all parameter groups """ # Save ids instead of Tensors def pack_group(group): packed = {k: v for k, v in group.items() if k != 'params'} packed['params'] = [id(p) for p in group['params']] return packed param_groups = [pack_group(g) for g in self.param_groups] # Remap state to use ids as keys packed_state = {(id(k) if isinstance(k, torch.Tensor) else k): v for k, v in self.state.items()} return { 'state': packed_state, 'param_groups': param_groups, } def load_state_dict(self, state_dict): r"""Loads the optimizer state. Arguments: state_dict (dict): optimizer state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = deepcopy(state_dict) # Validate the state_dict groups = self.param_groups saved_groups = state_dict['param_groups'] if len(groups) != len(saved_groups): raise ValueError("loaded state dict has a different number of " "parameter groups") param_lens = (len(g['params']) for g in groups) saved_lens = (len(g['params']) for g in saved_groups) if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): raise ValueError("loaded state dict contains a parameter group " "that doesn't match the size of optimizer's group") # Update the state id_map = {old_id: p for old_id, p in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))} def cast(param, value): r"""Make a deep copy of value, casting all tensors to device of param.""" if isinstance(value, torch.Tensor): # Floating-point types are a bit special here. They are the only ones # that are assumed to always match the type of params. if param.is_floating_point(): value = value.to(param.dtype) value = value.to(param.device) return value elif isinstance(value, dict): return {k: cast(param, v) for k, v in value.items()} elif isinstance(value, container_abcs.Iterable): return type(value)(cast(param, v) for v in value) else: return value # Copy state assigned to params (and cast tensors to appropriate types). # State that is not assigned to params is copied as is (needed for # backward compatibility). state = defaultdict(dict) for k, v in state_dict['state'].items(): if k in id_map: param = id_map[k] state[param] = cast(param, v) else: state[k] = v # Update parameter groups, setting their 'params' value def update_group(group, new_group): new_group['params'] = group['params'] return new_group param_groups = [ update_group(g, ng) for g, ng in zip(groups, saved_groups)] self.__setstate__({'state': state, 'param_groups': param_groups}) def zero_grad(self): r"""Clears the gradients of all optimized :class:`torch.Tensor` s.""" for group in self.param_groups: for p in group['params']: if p.grad is not None: p.grad.detach_() p.grad.zero_() def step(self, closure): r"""Performs a single optimization step (parameter update). Arguments: closure (callable): A closure that reevaluates the model and returns the loss. Optional for most optimizers. .. note:: Unless otherwise specified, this function should not modify the ``.grad`` field of the parameters. """ raise NotImplementedError def add_param_group(self, param_group): r"""Add a param group to the :class:`Optimizer` s `param_groups`. This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the :class:`Optimizer` as training progresses. Arguments: param_group (dict): Specifies what Tensors should be optimized along with group specific optimization options. """ assert isinstance(param_group, dict), "param group must be a dict" params = param_group['params'] if isinstance(params, torch.Tensor): param_group['params'] = [params] elif isinstance(params, set): raise TypeError('optimizer parameters need to be organized in ordered collections, but ' 'the ordering of tensors in sets will change between runs. Please use a list instead.') else: param_group['params'] = list(params) for param in param_group['params']: if not isinstance(param, torch.Tensor): raise TypeError("optimizer can only optimize Tensors, " "but one of the params is " + torch.typename(param)) if not param.is_leaf: raise ValueError("can't optimize a non-leaf Tensor") for name, default in self.defaults.items(): if default is required and name not in param_group: raise ValueError("parameter group didn't specify a value of required optimization parameter " + name) else: param_group.setdefault(name, default) param_set = set() for group in self.param_groups: param_set.update(set(group['params'])) if not param_set.isdisjoint(set(param_group['params'])): raise ValueError("some parameters appear in more than one parameter group") self.param_groups.append(param_group)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/optimizer.py
0.954584
0.355691
optimizer.py
pypi
import torch from .optimizer import Optimizer class Adadelta(Optimizer): """Implements Adadelta algorithm. It has been proposed in `ADADELTA: An Adaptive Learning Rate Method`__. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups rho (float, optional): coefficient used for computing a running average of squared gradients (default: 0.9) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-6) lr (float, optional): coefficient that scale delta before it is applied to the parameters (default: 1.0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) __ https://arxiv.org/abs/1212.5701 """ def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= rho <= 1.0: raise ValueError("Invalid rho value: {}".format(rho)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay) super(Adadelta, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Adadelta does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format) square_avg, acc_delta = state['square_avg'], state['acc_delta'] rho, eps = group['rho'], group['eps'] state['step'] += 1 if group['weight_decay'] != 0: grad = grad.add(p, alpha=group['weight_decay']) square_avg.mul_(rho).addcmul_(grad, grad, value=1 - rho) std = square_avg.add(eps).sqrt_() delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad) p.add_(delta, alpha=-group['lr']) acc_delta.mul_(rho).addcmul_(delta, delta, value=1 - rho) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/adadelta.py
0.937813
0.512449
adadelta.py
pypi
import torch from .optimizer import Optimizer, required class SGD(Optimizer): r"""Implements stochastic gradient descent (optionally with momentum). Nesterov momentum is based on the formula from `On the importance of initialization and momentum in deep learning`__. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float): learning rate momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) Example: >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) >>> optimizer.zero_grad() >>> loss_fn(model(input), target).backward() >>> optimizer.step() __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf .. note:: The implementation of SGD with Momentum/Nesterov subtly differs from Sutskever et. al. and implementations in some other frameworks. Considering the specific case of Momentum, the update can be written as .. math:: \begin{aligned} v_{t+1} & = \mu * v_{t} + g_{t+1}, \\ p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, \end{aligned} where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the parameters, gradient, velocity, and momentum respectively. This is in contrast to Sutskever et. al. and other frameworks which employ an update of the form .. math:: \begin{aligned} v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\ p_{t+1} & = p_{t} - v_{t+1}. \end{aligned} The Nesterov version is analogously modified. """ def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") super(SGD, self).__init__(params, defaults) def __setstate__(self, state): super(SGD, self).__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p in group['params']: if p.grad is None: continue d_p = p.grad if weight_decay != 0: d_p = d_p.add(p, alpha=weight_decay) if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(d_p).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf p.add_(d_p, alpha=-group['lr']) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/sgd.py
0.949634
0.70078
sgd.py
pypi
import math import torch from .optimizer import Optimizer class AdamW(Optimizer): r"""Implements AdamW algorithm. The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay coefficient (default: 1e-2) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(AdamW, self).__init__(params, defaults) def __setstate__(self, state): super(AdamW, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue # Perform stepweight decay p.mul_(1 - group['lr'] * group['weight_decay']) # Perform optimization step grad = p.grad if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/adamw.py
0.923303
0.623936
adamw.py
pypi
import math import torch from .optimizer import Optimizer class ASGD(Optimizer): """Implements Averaged Stochastic Gradient Descent. It has been proposed in `Acceleration of stochastic approximation by averaging`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) lambd (float, optional): decay term (default: 1e-4) alpha (float, optional): power for eta update (default: 0.75) t0 (float, optional): point at which to start averaging (default: 1e6) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) .. _Acceleration of stochastic approximation by averaging: http://dl.acm.org/citation.cfm?id=131098 """ def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, lambd=lambd, alpha=alpha, t0=t0, weight_decay=weight_decay) super(ASGD, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('ASGD does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['eta'] = group['lr'] state['mu'] = 1 state['ax'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['step'] += 1 if group['weight_decay'] != 0: grad = grad.add(p, alpha=group['weight_decay']) # decay term p.mul_(1 - group['lambd'] * state['eta']) # update parameter p.add_(grad, alpha=-state['eta']) # averaging if state['mu'] != 1: state['ax'].add_(p.sub(state['ax']).mul(state['mu'])) else: state['ax'].copy_(p) # update eta and mu state['eta'] = (group['lr'] / math.pow((1 + group['lambd'] * group['lr'] * state['step']), group['alpha'])) state['mu'] = 1 / max(1, state['step'] - group['t0']) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/asgd.py
0.870212
0.416025
asgd.py
pypi
import torch from .optimizer import Optimizer class Adamax(Optimizer): """Implements Adamax algorithm (a variant of Adam based on infinity norm). It has been proposed in `Adam: A Method for Stochastic Optimization`__. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) __ https://arxiv.org/abs/1412.6980 """ def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) super(Adamax, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Adamax does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['exp_inf'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avg, exp_inf = state['exp_avg'], state['exp_inf'] beta1, beta2 = group['betas'] eps = group['eps'] state['step'] += 1 if group['weight_decay'] != 0: grad = grad.add(p, alpha=group['weight_decay']) # Update biased first moment estimate. exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # Update the exponentially weighted infinity norm. norm_buf = torch.cat([ exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0) ], 0) torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long())) bias_correction = 1 - beta1 ** state['step'] clr = group['lr'] / bias_correction p.addcdiv_(exp_avg, exp_inf, value=-clr) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/adamax.py
0.920437
0.566618
adamax.py
pypi
import math import torch from .optimizer import Optimizer class SparseAdam(Optimizer): r"""Implements lazy version of Adam algorithm suitable for sparse tensors. In this variant, only moments that show up in the gradient get updated, and only those portions of the gradient get applied to the parameters. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8): if not 0.0 < lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 < eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps) super(SparseAdam, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if not grad.is_sparse: raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['step'] += 1 grad = grad.coalesce() # the update is non-linear so indices must be unique grad_indices = grad._indices() grad_values = grad._values() size = grad.size() def make_sparse(values): constructor = grad.new if grad_indices.dim() == 0 or values.dim() == 0: return constructor().resize_as_(grad) return constructor(grad_indices, values, size) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] # Decay the first and second moment running average coefficient # old <- b * old + (1 - b) * new # <==> old += (1 - b) * (new - old) old_exp_avg_values = exp_avg.sparse_mask(grad)._values() exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1) exp_avg.add_(make_sparse(exp_avg_update_values)) old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values() exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2) exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values)) # Dense addition again is intended, avoiding another sparse_mask numer = exp_avg_update_values.add_(old_exp_avg_values) exp_avg_sq_update_values.add_(old_exp_avg_sq_values) denom = exp_avg_sq_update_values.sqrt_().add_(group['eps']) del exp_avg_update_values, exp_avg_sq_update_values bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 p.add_(make_sparse(-step_size * numer.div_(denom))) return loss
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/optim/sparse_adam.py
0.882706
0.608158
sparse_adam.py
pypi
import math from torch._six import inf, nan from numbers import Number import torch from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import broadcast_all class Cauchy(Distribution): r""" Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of independent normally distributed random variables with means `0` follows a Cauchy distribution. Example:: >>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0])) >>> m.sample() # sample from a Cauchy distribution with loc=0 and scale=1 tensor([ 2.3214]) Args: loc (float or Tensor): mode or median of the distribution. scale (float or Tensor): half width at half maximum. """ arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} support = constraints.real has_rsample = True def __init__(self, loc, scale, validate_args=None): self.loc, self.scale = broadcast_all(loc, scale) if isinstance(loc, Number) and isinstance(scale, Number): batch_shape = torch.Size() else: batch_shape = self.loc.size() super(Cauchy, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Cauchy, _instance) batch_shape = torch.Size(batch_shape) new.loc = self.loc.expand(batch_shape) new.scale = self.scale.expand(batch_shape) super(Cauchy, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new @property def mean(self): return torch.full(self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device) @property def variance(self): return torch.full(self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device) def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) eps = self.loc.new(shape).cauchy_() return self.loc + eps * self.scale def log_prob(self, value): if self._validate_args: self._validate_sample(value) return -math.log(math.pi) - self.scale.log() - (1 + ((value - self.loc) / self.scale)**2).log() def cdf(self, value): if self._validate_args: self._validate_sample(value) return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5 def icdf(self, value): if self._validate_args: self._validate_sample(value) return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc def entropy(self): return math.log(4 * math.pi) + self.scale.log()
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/cauchy.py
0.948454
0.525308
cauchy.py
pypi
from numbers import Number import math import torch from torch.distributions import constraints from torch.distributions.uniform import Uniform from torch.distributions.transformed_distribution import TransformedDistribution from torch.distributions.transforms import AffineTransform, ExpTransform from torch.distributions.utils import broadcast_all euler_constant = 0.57721566490153286060 # Euler Mascheroni Constant class Gumbel(TransformedDistribution): r""" Samples from a Gumbel Distribution. Examples:: >>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0])) >>> m.sample() # sample from Gumbel distribution with loc=1, scale=2 tensor([ 1.0124]) Args: loc (float or Tensor): Location parameter of the distribution scale (float or Tensor): Scale parameter of the distribution """ arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} support = constraints.real def __init__(self, loc, scale, validate_args=None): self.loc, self.scale = broadcast_all(loc, scale) finfo = torch.finfo(self.loc.dtype) if isinstance(loc, Number) and isinstance(scale, Number): base_dist = Uniform(finfo.tiny, 1 - finfo.eps) else: base_dist = Uniform(torch.full_like(self.loc, finfo.tiny), torch.full_like(self.loc, 1 - finfo.eps)) transforms = [ExpTransform().inv, AffineTransform(loc=0, scale=-torch.ones_like(self.scale)), ExpTransform().inv, AffineTransform(loc=loc, scale=-self.scale)] super(Gumbel, self).__init__(base_dist, transforms, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Gumbel, _instance) new.loc = self.loc.expand(batch_shape) new.scale = self.scale.expand(batch_shape) return super(Gumbel, self).expand(batch_shape, _instance=new) # Explicitly defining the log probability function for Gumbel due to precision issues def log_prob(self, value): if self._validate_args: self._validate_sample(value) y = (self.loc - value) / self.scale return (y - y.exp()) - self.scale.log() @property def mean(self): return self.loc + self.scale * euler_constant @property def stddev(self): return (math.pi / math.sqrt(6)) * self.scale @property def variance(self): return self.stddev.pow(2) def entropy(self): return self.scale.log() + (1 + euler_constant)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/gumbel.py
0.937797
0.468487
gumbel.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import broadcast_all, probs_to_logits, lazy_property, logits_to_probs def _clamp_by_zero(x): # works like clamp(x, min=0) but has grad at 0 is 0.5 return (x.clamp(min=0) + x - x.clamp(max=0)) / 2 class Binomial(Distribution): r""" Creates a Binomial distribution parameterized by :attr:`total_count` and either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be broadcastable with :attr:`probs`/:attr:`logits`. Example:: >>> m = Binomial(100, torch.tensor([0 , .2, .8, 1])) >>> x = m.sample() tensor([ 0., 22., 71., 100.]) >>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8])) >>> x = m.sample() tensor([[ 4., 5.], [ 7., 6.]]) Args: total_count (int or Tensor): number of Bernoulli trials probs (Tensor): Event probabilities logits (Tensor): Event log-odds """ arg_constraints = {'total_count': constraints.nonnegative_integer, 'probs': constraints.unit_interval, 'logits': constraints.real} has_enumerate_support = True def __init__(self, total_count=1, probs=None, logits=None, validate_args=None): if (probs is None) == (logits is None): raise ValueError("Either `probs` or `logits` must be specified, but not both.") if probs is not None: self.total_count, self.probs, = broadcast_all(total_count, probs) self.total_count = self.total_count.type_as(self.logits) is_scalar = isinstance(self.probs, Number) else: self.total_count, self.logits, = broadcast_all(total_count, logits) self.total_count = self.total_count.type_as(self.logits) is_scalar = isinstance(self.logits, Number) self._param = self.probs if probs is not None else self.logits if is_scalar: batch_shape = torch.Size() else: batch_shape = self._param.size() super(Binomial, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Binomial, _instance) batch_shape = torch.Size(batch_shape) new.total_count = self.total_count.expand(batch_shape) if 'probs' in self.__dict__: new.probs = self.probs.expand(batch_shape) new._param = new.probs if 'logits' in self.__dict__: new.logits = self.logits.expand(batch_shape) new._param = new.logits super(Binomial, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._param.new(*args, **kwargs) @constraints.dependent_property def support(self): return constraints.integer_interval(0, self.total_count) @property def mean(self): return self.total_count * self.probs @property def variance(self): return self.total_count * self.probs * (1 - self.probs) @lazy_property def logits(self): return probs_to_logits(self.probs, is_binary=True) @lazy_property def probs(self): return logits_to_probs(self.logits, is_binary=True) @property def param_shape(self): return self._param.size() def sample(self, sample_shape=torch.Size()): with torch.no_grad(): max_count = max(int(self.total_count.max()), 1) shape = self._extended_shape(sample_shape) + (max_count,) bernoullis = torch.bernoulli(self.probs.unsqueeze(-1).expand(shape)) if self.total_count.min() != max_count: arange = torch.arange(max_count, dtype=self._param.dtype, device=self._param.device) mask = arange >= self.total_count.unsqueeze(-1) if torch._C._get_tracing_state(): # [JIT WORKAROUND] lack of support for .masked_fill_() bernoullis[mask.expand(shape)] = 0. else: bernoullis.masked_fill_(mask, 0.) return bernoullis.sum(dim=-1) def log_prob(self, value): if self._validate_args: self._validate_sample(value) log_factorial_n = torch.lgamma(self.total_count + 1) log_factorial_k = torch.lgamma(value + 1) log_factorial_nmk = torch.lgamma(self.total_count - value + 1) # k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p) # (case logit < 0) = k * logit - n * log1p(e^logit) # (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p) # = k * logit - n * logit - n * log1p(e^-logit) # (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|) normalize_term = (self.total_count * _clamp_by_zero(self.logits) + self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits))) - log_factorial_n) return value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term def enumerate_support(self, expand=True): total_count = int(self.total_count.max()) if not self.total_count.min() == total_count: raise NotImplementedError("Inhomogeneous total count not supported by `enumerate_support`.") values = torch.arange(1 + total_count, dtype=self._param.dtype, device=self._param.device) values = values.view((-1,) + (1,) * len(self._batch_shape)) if expand: values = values.expand((-1,) + self._batch_shape) return values
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/binomial.py
0.909793
0.354433
binomial.py
pypi
import torch from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.transforms import Transform from torch.distributions.utils import _sum_rightmost class TransformedDistribution(Distribution): r""" Extension of the Distribution class, which applies a sequence of Transforms to a base distribution. Let f be the composition of transforms applied:: X ~ BaseDistribution Y = f(X) ~ TransformedDistribution(BaseDistribution, f) log p(Y) = log p(X) + log |det (dX/dY)| Note that the ``.event_shape`` of a :class:`TransformedDistribution` is the maximum shape of its base distribution and its transforms, since transforms can introduce correlations among events. An example for the usage of :class:`TransformedDistribution` would be:: # Building a Logistic Distribution # X ~ Uniform(0, 1) # f = a + b * logit(X) # Y ~ f(X) ~ Logistic(a, b) base_distribution = Uniform(0, 1) transforms = [SigmoidTransform().inv, AffineTransform(loc=a, scale=b)] logistic = TransformedDistribution(base_distribution, transforms) For more examples, please look at the implementations of :class:`~torch.distributions.gumbel.Gumbel`, :class:`~torch.distributions.half_cauchy.HalfCauchy`, :class:`~torch.distributions.half_normal.HalfNormal`, :class:`~torch.distributions.log_normal.LogNormal`, :class:`~torch.distributions.pareto.Pareto`, :class:`~torch.distributions.weibull.Weibull`, :class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli` and :class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical` """ arg_constraints = {} def __init__(self, base_distribution, transforms, validate_args=None): self.base_dist = base_distribution if isinstance(transforms, Transform): self.transforms = [transforms, ] elif isinstance(transforms, list): if not all(isinstance(t, Transform) for t in transforms): raise ValueError("transforms must be a Transform or a list of Transforms") self.transforms = transforms else: raise ValueError("transforms must be a Transform or list, but was {}".format(transforms)) shape = self.base_dist.batch_shape + self.base_dist.event_shape event_dim = max([len(self.base_dist.event_shape)] + [t.event_dim for t in self.transforms]) batch_shape = shape[:len(shape) - event_dim] event_shape = shape[len(shape) - event_dim:] super(TransformedDistribution, self).__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(TransformedDistribution, _instance) batch_shape = torch.Size(batch_shape) base_dist_batch_shape = batch_shape + self.base_dist.batch_shape[len(self.batch_shape):] new.base_dist = self.base_dist.expand(base_dist_batch_shape) new.transforms = self.transforms super(TransformedDistribution, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new @constraints.dependent_property def support(self): return self.transforms[-1].codomain if self.transforms else self.base_dist.support @property def has_rsample(self): return self.base_dist.has_rsample def sample(self, sample_shape=torch.Size()): """ Generates a sample_shape shaped sample or sample_shape shaped batch of samples if the distribution parameters are batched. Samples first from base distribution and applies `transform()` for every transform in the list. """ with torch.no_grad(): x = self.base_dist.sample(sample_shape) for transform in self.transforms: x = transform(x) return x def rsample(self, sample_shape=torch.Size()): """ Generates a sample_shape shaped reparameterized sample or sample_shape shaped batch of reparameterized samples if the distribution parameters are batched. Samples first from base distribution and applies `transform()` for every transform in the list. """ x = self.base_dist.rsample(sample_shape) for transform in self.transforms: x = transform(x) return x def log_prob(self, value): """ Scores the sample by inverting the transform(s) and computing the score using the score of the base distribution and the log abs det jacobian. """ event_dim = len(self.event_shape) log_prob = 0.0 y = value for transform in reversed(self.transforms): x = transform.inv(y) log_prob = log_prob - _sum_rightmost(transform.log_abs_det_jacobian(x, y), event_dim - transform.event_dim) y = x log_prob = log_prob + _sum_rightmost(self.base_dist.log_prob(y), event_dim - len(self.base_dist.event_shape)) return log_prob def _monotonize_cdf(self, value): """ This conditionally flips ``value -> 1-value`` to ensure :meth:`cdf` is monotone increasing. """ sign = 1 for transform in self.transforms: sign = sign * transform.sign if isinstance(sign, int) and sign == 1: return value return sign * (value - 0.5) + 0.5 def cdf(self, value): """ Computes the cumulative distribution function by inverting the transform(s) and computing the score of the base distribution. """ for transform in self.transforms[::-1]: value = transform.inv(value) if self._validate_args: self.base_dist._validate_sample(value) value = self.base_dist.cdf(value) value = self._monotonize_cdf(value) return value def icdf(self, value): """ Computes the inverse cumulative distribution function using transform(s) and computing the score of the base distribution. """ value = self._monotonize_cdf(value) if self._validate_args: self.base_dist._validate_sample(value) value = self.base_dist.icdf(value) for transform in self.transforms: value = transform(value) return value
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/transformed_distribution.py
0.939345
0.706849
transformed_distribution.py
pypi
import torch import warnings from torch.distributions import constraints from torch.distributions.utils import lazy_property class Distribution(object): r""" Distribution is the abstract base class for probability distributions. """ has_rsample = False has_enumerate_support = False _validate_args = False support = None arg_constraints = {} @staticmethod def set_default_validate_args(value): if value not in [True, False]: raise ValueError Distribution._validate_args = value def __init__(self, batch_shape=torch.Size(), event_shape=torch.Size(), validate_args=None): self._batch_shape = batch_shape self._event_shape = event_shape if validate_args is not None: self._validate_args = validate_args if self._validate_args: for param, constraint in self.arg_constraints.items(): if constraints.is_dependent(constraint): continue # skip constraints that cannot be checked if param not in self.__dict__ and isinstance(getattr(type(self), param), lazy_property): continue # skip checking lazily-constructed args if not constraint.check(getattr(self, param)).all(): raise ValueError("The parameter {} has invalid values".format(param)) super(Distribution, self).__init__() def expand(self, batch_shape, _instance=None): """ Returns a new distribution instance (or populates an existing instance provided by a derived class) with batch dimensions expanded to `batch_shape`. This method calls :class:`~torch.Tensor.expand` on the distribution's parameters. As such, this does not allocate new memory for the expanded distribution instance. Additionally, this does not repeat any args checking or parameter broadcasting in `__init__.py`, when an instance is first created. Args: batch_shape (torch.Size): the desired expanded size. _instance: new instance provided by subclasses that need to override `.expand`. Returns: New distribution instance with batch dimensions expanded to `batch_size`. """ raise NotImplementedError @property def batch_shape(self): """ Returns the shape over which parameters are batched. """ return self._batch_shape @property def event_shape(self): """ Returns the shape of a single sample (without batching). """ return self._event_shape @property def arg_constraints(self): """ Returns a dictionary from argument names to :class:`~torch.distributions.constraints.Constraint` objects that should be satisfied by each argument of this distribution. Args that are not tensors need not appear in this dict. """ raise NotImplementedError @property def support(self): """ Returns a :class:`~torch.distributions.constraints.Constraint` object representing this distribution's support. """ raise NotImplementedError @property def mean(self): """ Returns the mean of the distribution. """ raise NotImplementedError @property def variance(self): """ Returns the variance of the distribution. """ raise NotImplementedError @property def stddev(self): """ Returns the standard deviation of the distribution. """ return self.variance.sqrt() def sample(self, sample_shape=torch.Size()): """ Generates a sample_shape shaped sample or sample_shape shaped batch of samples if the distribution parameters are batched. """ with torch.no_grad(): return self.rsample(sample_shape) def rsample(self, sample_shape=torch.Size()): """ Generates a sample_shape shaped reparameterized sample or sample_shape shaped batch of reparameterized samples if the distribution parameters are batched. """ raise NotImplementedError def sample_n(self, n): """ Generates n samples or n batches of samples if the distribution parameters are batched. """ warnings.warn('sample_n will be deprecated. Use .sample((n,)) instead', UserWarning) return self.sample(torch.Size((n,))) def log_prob(self, value): """ Returns the log of the probability density/mass function evaluated at `value`. Args: value (Tensor): """ raise NotImplementedError def cdf(self, value): """ Returns the cumulative density/mass function evaluated at `value`. Args: value (Tensor): """ raise NotImplementedError def icdf(self, value): """ Returns the inverse cumulative density/mass function evaluated at `value`. Args: value (Tensor): """ raise NotImplementedError def enumerate_support(self, expand=True): """ Returns tensor containing all values supported by a discrete distribution. The result will enumerate over dimension 0, so the shape of the result will be `(cardinality,) + batch_shape + event_shape` (where `event_shape = ()` for univariate distributions). Note that this enumerates over all batched tensors in lock-step `[[0, 0], [1, 1], ...]`. With `expand=False`, enumeration happens along dim 0, but with the remaining batch dimensions being singleton dimensions, `[[0], [1], ..`. To iterate over the full Cartesian product use `itertools.product(m.enumerate_support())`. Args: expand (bool): whether to expand the support over the batch dims to match the distribution's `batch_shape`. Returns: Tensor iterating over dimension 0. """ raise NotImplementedError def entropy(self): """ Returns entropy of distribution, batched over batch_shape. Returns: Tensor of shape batch_shape. """ raise NotImplementedError def perplexity(self): """ Returns perplexity of distribution, batched over batch_shape. Returns: Tensor of shape batch_shape. """ return torch.exp(self.entropy()) def _extended_shape(self, sample_shape=torch.Size()): """ Returns the size of the sample returned by the distribution, given a `sample_shape`. Note, that the batch and event shapes of a distribution instance are fixed at the time of construction. If this is empty, the returned shape is upcast to (1,). Args: sample_shape (torch.Size): the size of the sample to be drawn. """ if not isinstance(sample_shape, torch.Size): sample_shape = torch.Size(sample_shape) return sample_shape + self._batch_shape + self._event_shape def _validate_sample(self, value): """ Argument validation for distribution methods such as `log_prob`, `cdf` and `icdf`. The rightmost dimensions of a value to be scored via these methods must agree with the distribution's batch and event shapes. Args: value (Tensor): the tensor whose log probability is to be computed by the `log_prob` method. Raises ValueError: when the rightmost dimensions of `value` do not match the distribution's batch and event shapes. """ if not isinstance(value, torch.Tensor): raise ValueError('The value argument to log_prob must be a Tensor') event_dim_start = len(value.size()) - len(self._event_shape) if value.size()[event_dim_start:] != self._event_shape: raise ValueError('The right-most size of value must match event_shape: {} vs {}.'. format(value.size(), self._event_shape)) actual_shape = value.size() expected_shape = self._batch_shape + self._event_shape for i, j in zip(reversed(actual_shape), reversed(expected_shape)): if i != 1 and j != 1 and i != j: raise ValueError('Value is not broadcastable with batch_shape+event_shape: {} vs {}.'. format(actual_shape, expected_shape)) if not self.support.check(value).all(): raise ValueError('The value argument must be within the support') def _get_checked_instance(self, cls, _instance=None): if _instance is None and type(self).__init__ != cls.__init__: raise NotImplementedError("Subclass {} of {} that defines a custom __init__ method " "must also define a custom .expand() method.". format(self.__class__.__name__, cls.__name__)) return self.__new__(type(self)) if _instance is None else _instance def __repr__(self): param_names = [k for k, _ in self.arg_constraints.items() if k in self.__dict__] args_string = ', '.join(['{}: {}'.format(p, self.__dict__[p] if self.__dict__[p].numel() == 1 else self.__dict__[p].size()) for p in param_names]) return self.__class__.__name__ + '(' + args_string + ')'
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/distribution.py
0.943484
0.509581
distribution.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.exp_family import ExponentialFamily from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property from torch.nn.functional import binary_cross_entropy_with_logits class Bernoulli(ExponentialFamily): r""" Creates a Bernoulli distribution parameterized by :attr:`probs` or :attr:`logits` (but not both). Samples are binary (0 or 1). They take the value `1` with probability `p` and `0` with probability `1 - p`. Example:: >>> m = Bernoulli(torch.tensor([0.3])) >>> m.sample() # 30% chance 1; 70% chance 0 tensor([ 0.]) Args: probs (Number, Tensor): the probability of sampling `1` logits (Number, Tensor): the log-odds of sampling `1` """ arg_constraints = {'probs': constraints.unit_interval, 'logits': constraints.real} support = constraints.boolean has_enumerate_support = True _mean_carrier_measure = 0 def __init__(self, probs=None, logits=None, validate_args=None): if (probs is None) == (logits is None): raise ValueError("Either `probs` or `logits` must be specified, but not both.") if probs is not None: is_scalar = isinstance(probs, Number) self.probs, = broadcast_all(probs) else: is_scalar = isinstance(logits, Number) self.logits, = broadcast_all(logits) self._param = self.probs if probs is not None else self.logits if is_scalar: batch_shape = torch.Size() else: batch_shape = self._param.size() super(Bernoulli, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Bernoulli, _instance) batch_shape = torch.Size(batch_shape) if 'probs' in self.__dict__: new.probs = self.probs.expand(batch_shape) new._param = new.probs if 'logits' in self.__dict__: new.logits = self.logits.expand(batch_shape) new._param = new.logits super(Bernoulli, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._param.new(*args, **kwargs) @property def mean(self): return self.probs @property def variance(self): return self.probs * (1 - self.probs) @lazy_property def logits(self): return probs_to_logits(self.probs, is_binary=True) @lazy_property def probs(self): return logits_to_probs(self.logits, is_binary=True) @property def param_shape(self): return self._param.size() def sample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) with torch.no_grad(): return torch.bernoulli(self.probs.expand(shape)) def log_prob(self, value): if self._validate_args: self._validate_sample(value) logits, value = broadcast_all(self.logits, value) return -binary_cross_entropy_with_logits(logits, value, reduction='none') def entropy(self): return binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none') def enumerate_support(self, expand=True): values = torch.arange(2, dtype=self._param.dtype, device=self._param.device) values = values.view((-1,) + (1,) * len(self._batch_shape)) if expand: values = values.expand((-1,) + self._batch_shape) return values @property def _natural_params(self): return (torch.log(self.probs / (1 - self.probs)), ) def _log_normalizer(self, x): return torch.log(1 + torch.exp(x))
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/bernoulli.py
0.946745
0.522324
bernoulli.py
pypi
import torch from torch.distributions.distribution import Distribution class ExponentialFamily(Distribution): r""" ExponentialFamily is the abstract base class for probability distributions belonging to an exponential family, whose probability mass/density function has the form is defined below .. math:: p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x)) where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic, :math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier measure. Note: This class is an intermediary between the `Distribution` class and distributions which belong to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL divergence methods. We use this class to compute the entropy and KL divergence using the AD framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and Cross-entropies of Exponential Families). """ @property def _natural_params(self): """ Abstract method for natural parameters. Returns a tuple of Tensors based on the distribution """ raise NotImplementedError def _log_normalizer(self, *natural_params): """ Abstract method for log normalizer function. Returns a log normalizer based on the distribution and input """ raise NotImplementedError @property def _mean_carrier_measure(self): """ Abstract method for expected carrier measure, which is required for computing entropy. """ raise NotImplementedError def entropy(self): """ Method to compute the entropy using Bregman divergence of the log normalizer. """ result = -self._mean_carrier_measure nparams = [p.detach().requires_grad_() for p in self._natural_params] lg_normal = self._log_normalizer(*nparams) gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True) result += lg_normal for np, g in zip(nparams, gradients): result -= np * g return result
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/exp_family.py
0.918311
0.894005
exp_family.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import broadcast_all class Uniform(Distribution): r""" Generates uniformly distributed random samples from the half-open interval ``[low, high)``. Example:: >>> m = Uniform(torch.tensor([0.0]), torch.tensor([5.0])) >>> m.sample() # uniformly distributed in the range [0.0, 5.0) tensor([ 2.3418]) Args: low (float or Tensor): lower range (inclusive). high (float or Tensor): upper range (exclusive). """ # TODO allow (loc,scale) parameterization to allow independent constraints. arg_constraints = {'low': constraints.dependent, 'high': constraints.dependent} has_rsample = True @property def mean(self): return (self.high + self.low) / 2 @property def stddev(self): return (self.high - self.low) / 12**0.5 @property def variance(self): return (self.high - self.low).pow(2) / 12 def __init__(self, low, high, validate_args=None): self.low, self.high = broadcast_all(low, high) if isinstance(low, Number) and isinstance(high, Number): batch_shape = torch.Size() else: batch_shape = self.low.size() super(Uniform, self).__init__(batch_shape, validate_args=validate_args) if self._validate_args and not torch.lt(self.low, self.high).all(): raise ValueError("Uniform is not defined when low>= high") def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Uniform, _instance) batch_shape = torch.Size(batch_shape) new.low = self.low.expand(batch_shape) new.high = self.high.expand(batch_shape) super(Uniform, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new @constraints.dependent_property def support(self): return constraints.interval(self.low, self.high) def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) rand = torch.rand(shape, dtype=self.low.dtype, device=self.low.device) return self.low + rand * (self.high - self.low) def log_prob(self, value): if self._validate_args: self._validate_sample(value) lb = self.low.le(value).type_as(self.low) ub = self.high.gt(value).type_as(self.low) return torch.log(lb.mul(ub)) - torch.log(self.high - self.low) def cdf(self, value): if self._validate_args: self._validate_sample(value) result = (value - self.low) / (self.high - self.low) return result.clamp(min=0, max=1) def icdf(self, value): if self._validate_args: self._validate_sample(value) result = value * (self.high - self.low) + self.low return result def entropy(self): return torch.log(self.high - self.low)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/uniform.py
0.900893
0.562237
uniform.py
pypi
import math import torch from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import _standard_normal, lazy_property def _batch_mv(bmat, bvec): r""" Performs a batched matrix-vector product, with compatible but different batch shapes. This function takes as input `bmat`, containing :math:`n \times n` matrices, and `bvec`, containing length :math:`n` vectors. Both `bmat` and `bvec` may have any number of leading dimensions, which correspond to a batch shape. They are not necessarily assumed to have the same batch shape, just ones which can be broadcasted. """ return torch.matmul(bmat, bvec.unsqueeze(-1)).squeeze(-1) def _batch_mahalanobis(bL, bx): r""" Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}` for a factored :math:`\mathbf{M} = \mathbf{L}\mathbf{L}^\top`. Accepts batches for both bL and bx. They are not necessarily assumed to have the same batch shape, but `bL` one should be able to broadcasted to `bx` one. """ n = bx.size(-1) bx_batch_shape = bx.shape[:-1] # Assume that bL.shape = (i, 1, n, n), bx.shape = (..., i, j, n), # we are going to make bx have shape (..., 1, j, i, 1, n) to apply batched tri.solve bx_batch_dims = len(bx_batch_shape) bL_batch_dims = bL.dim() - 2 outer_batch_dims = bx_batch_dims - bL_batch_dims old_batch_dims = outer_batch_dims + bL_batch_dims new_batch_dims = outer_batch_dims + 2 * bL_batch_dims # Reshape bx with the shape (..., 1, i, j, 1, n) bx_new_shape = bx.shape[:outer_batch_dims] for (sL, sx) in zip(bL.shape[:-2], bx.shape[outer_batch_dims:-1]): bx_new_shape += (sx // sL, sL) bx_new_shape += (n,) bx = bx.reshape(bx_new_shape) # Permute bx to make it have shape (..., 1, j, i, 1, n) permute_dims = (list(range(outer_batch_dims)) + list(range(outer_batch_dims, new_batch_dims, 2)) + list(range(outer_batch_dims + 1, new_batch_dims, 2)) + [new_batch_dims]) bx = bx.permute(permute_dims) flat_L = bL.reshape(-1, n, n) # shape = b x n x n flat_x = bx.reshape(-1, flat_L.size(0), n) # shape = c x b x n flat_x_swap = flat_x.permute(1, 2, 0) # shape = b x n x c M_swap = torch.triangular_solve(flat_x_swap, flat_L, upper=False)[0].pow(2).sum(-2) # shape = b x c M = M_swap.t() # shape = c x b # Now we revert the above reshape and permute operators. permuted_M = M.reshape(bx.shape[:-1]) # shape = (..., 1, j, i, 1) permute_inv_dims = list(range(outer_batch_dims)) for i in range(bL_batch_dims): permute_inv_dims += [outer_batch_dims + i, old_batch_dims + i] reshaped_M = permuted_M.permute(permute_inv_dims) # shape = (..., 1, i, j, 1) return reshaped_M.reshape(bx_batch_shape) def _precision_to_scale_tril(P): # Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril Lf = torch.cholesky(torch.flip(P, (-2, -1))) L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1) L = torch.triangular_solve(torch.eye(P.shape[-1], dtype=P.dtype, device=P.device), L_inv, upper=False)[0] return L class MultivariateNormal(Distribution): r""" Creates a multivariate normal (also called Gaussian) distribution parameterized by a mean vector and a covariance matrix. The multivariate normal distribution can be parameterized either in terms of a positive definite covariance matrix :math:`\mathbf{\Sigma}` or a positive definite precision matrix :math:`\mathbf{\Sigma}^{-1}` or a lower-triangular matrix :math:`\mathbf{L}` with positive-valued diagonal entries, such that :math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top`. This triangular matrix can be obtained via e.g. Cholesky decomposition of the covariance. Example: >>> m = MultivariateNormal(torch.zeros(2), torch.eye(2)) >>> m.sample() # normally distributed with mean=`[0,0]` and covariance_matrix=`I` tensor([-0.2102, -0.5429]) Args: loc (Tensor): mean of the distribution covariance_matrix (Tensor): positive-definite covariance matrix precision_matrix (Tensor): positive-definite precision matrix scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal Note: Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or :attr:`scale_tril` can be specified. Using :attr:`scale_tril` will be more efficient: all computations internally are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or :attr:`precision_matrix` is passed instead, it is only used to compute the corresponding lower triangular matrices using a Cholesky decomposition. """ arg_constraints = {'loc': constraints.real_vector, 'covariance_matrix': constraints.positive_definite, 'precision_matrix': constraints.positive_definite, 'scale_tril': constraints.lower_cholesky} support = constraints.real has_rsample = True def __init__(self, loc, covariance_matrix=None, precision_matrix=None, scale_tril=None, validate_args=None): if loc.dim() < 1: raise ValueError("loc must be at least one-dimensional.") if (covariance_matrix is not None) + (scale_tril is not None) + (precision_matrix is not None) != 1: raise ValueError("Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified.") loc_ = loc.unsqueeze(-1) # temporarily add dim on right if scale_tril is not None: if scale_tril.dim() < 2: raise ValueError("scale_tril matrix must be at least two-dimensional, " "with optional leading batch dimensions") self.scale_tril, loc_ = torch.broadcast_tensors(scale_tril, loc_) elif covariance_matrix is not None: if covariance_matrix.dim() < 2: raise ValueError("covariance_matrix must be at least two-dimensional, " "with optional leading batch dimensions") self.covariance_matrix, loc_ = torch.broadcast_tensors(covariance_matrix, loc_) else: if precision_matrix.dim() < 2: raise ValueError("precision_matrix must be at least two-dimensional, " "with optional leading batch dimensions") self.precision_matrix, loc_ = torch.broadcast_tensors(precision_matrix, loc_) self.loc = loc_[..., 0] # drop rightmost dim batch_shape, event_shape = self.loc.shape[:-1], self.loc.shape[-1:] super(MultivariateNormal, self).__init__(batch_shape, event_shape, validate_args=validate_args) if scale_tril is not None: self._unbroadcasted_scale_tril = scale_tril elif covariance_matrix is not None: self._unbroadcasted_scale_tril = torch.cholesky(covariance_matrix) else: # precision_matrix is not None self._unbroadcasted_scale_tril = _precision_to_scale_tril(precision_matrix) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(MultivariateNormal, _instance) batch_shape = torch.Size(batch_shape) loc_shape = batch_shape + self.event_shape cov_shape = batch_shape + self.event_shape + self.event_shape new.loc = self.loc.expand(loc_shape) new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril if 'covariance_matrix' in self.__dict__: new.covariance_matrix = self.covariance_matrix.expand(cov_shape) if 'scale_tril' in self.__dict__: new.scale_tril = self.scale_tril.expand(cov_shape) if 'precision_matrix' in self.__dict__: new.precision_matrix = self.precision_matrix.expand(cov_shape) super(MultivariateNormal, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new @lazy_property def scale_tril(self): return self._unbroadcasted_scale_tril.expand( self._batch_shape + self._event_shape + self._event_shape) @lazy_property def covariance_matrix(self): return (torch.matmul(self._unbroadcasted_scale_tril, self._unbroadcasted_scale_tril.transpose(-1, -2)) .expand(self._batch_shape + self._event_shape + self._event_shape)) @lazy_property def precision_matrix(self): identity = torch.eye(self.loc.size(-1), device=self.loc.device, dtype=self.loc.dtype) # TODO: use cholesky_inverse when its batching is supported return torch.cholesky_solve(identity, self._unbroadcasted_scale_tril).expand( self._batch_shape + self._event_shape + self._event_shape) @property def mean(self): return self.loc @property def variance(self): return self._unbroadcasted_scale_tril.pow(2).sum(-1).expand( self._batch_shape + self._event_shape) def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device) return self.loc + _batch_mv(self._unbroadcasted_scale_tril, eps) def log_prob(self, value): if self._validate_args: self._validate_sample(value) diff = value - self.loc M = _batch_mahalanobis(self._unbroadcasted_scale_tril, diff) half_log_det = self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + M) - half_log_det def entropy(self): half_log_det = self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) H = 0.5 * self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + half_log_det if len(self._batch_shape) == 0: return H else: return H.expand(self._batch_shape)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/multivariate_normal.py
0.938513
0.76708
multivariate_normal.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.dirichlet import Dirichlet from torch.distributions.exp_family import ExponentialFamily from torch.distributions.utils import broadcast_all class Beta(ExponentialFamily): r""" Beta distribution parameterized by :attr:`concentration1` and :attr:`concentration0`. Example:: >>> m = Beta(torch.tensor([0.5]), torch.tensor([0.5])) >>> m.sample() # Beta distributed with concentration concentration1 and concentration0 tensor([ 0.1046]) Args: concentration1 (float or Tensor): 1st concentration parameter of the distribution (often referred to as alpha) concentration0 (float or Tensor): 2nd concentration parameter of the distribution (often referred to as beta) """ arg_constraints = {'concentration1': constraints.positive, 'concentration0': constraints.positive} support = constraints.unit_interval has_rsample = True def __init__(self, concentration1, concentration0, validate_args=None): if isinstance(concentration1, Number) and isinstance(concentration0, Number): concentration1_concentration0 = torch.tensor([float(concentration1), float(concentration0)]) else: concentration1, concentration0 = broadcast_all(concentration1, concentration0) concentration1_concentration0 = torch.stack([concentration1, concentration0], -1) self._dirichlet = Dirichlet(concentration1_concentration0) super(Beta, self).__init__(self._dirichlet._batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Beta, _instance) batch_shape = torch.Size(batch_shape) new._dirichlet = self._dirichlet.expand(batch_shape) super(Beta, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new @property def mean(self): return self.concentration1 / (self.concentration1 + self.concentration0) @property def variance(self): total = self.concentration1 + self.concentration0 return (self.concentration1 * self.concentration0 / (total.pow(2) * (total + 1))) def rsample(self, sample_shape=()): return self._dirichlet.rsample(sample_shape).select(-1, 0) def log_prob(self, value): if self._validate_args: self._validate_sample(value) heads_tails = torch.stack([value, 1.0 - value], -1) return self._dirichlet.log_prob(heads_tails) def entropy(self): return self._dirichlet.entropy() @property def concentration1(self): result = self._dirichlet.concentration[..., 0] if isinstance(result, Number): return torch.tensor([result]) else: return result @property def concentration0(self): result = self._dirichlet.concentration[..., 1] if isinstance(result, Number): return torch.tensor([result]) else: return result @property def _natural_params(self): return (self.concentration1, self.concentration0) def _log_normalizer(self, x, y): return torch.lgamma(x) + torch.lgamma(y) - torch.lgamma(x + y)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/beta.py
0.964888
0.562297
beta.py
pypi
from numbers import Number import torch from torch._six import nan from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.gamma import Gamma from torch.distributions.utils import broadcast_all class FisherSnedecor(Distribution): r""" Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`. Example:: >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0])) >>> m.sample() # Fisher-Snedecor-distributed with df1=1 and df2=2 tensor([ 0.2453]) Args: df1 (float or Tensor): degrees of freedom parameter 1 df2 (float or Tensor): degrees of freedom parameter 2 """ arg_constraints = {'df1': constraints.positive, 'df2': constraints.positive} support = constraints.positive has_rsample = True def __init__(self, df1, df2, validate_args=None): self.df1, self.df2 = broadcast_all(df1, df2) self._gamma1 = Gamma(self.df1 * 0.5, self.df1) self._gamma2 = Gamma(self.df2 * 0.5, self.df2) if isinstance(df1, Number) and isinstance(df2, Number): batch_shape = torch.Size() else: batch_shape = self.df1.size() super(FisherSnedecor, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(FisherSnedecor, _instance) batch_shape = torch.Size(batch_shape) new.df1 = self.df1.expand(batch_shape) new.df2 = self.df2.expand(batch_shape) new._gamma1 = self._gamma1.expand(batch_shape) new._gamma2 = self._gamma2.expand(batch_shape) super(FisherSnedecor, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new @property def mean(self): df2 = self.df2.clone(memory_format=torch.contiguous_format) df2[df2 <= 2] = nan return df2 / (df2 - 2) @property def variance(self): df2 = self.df2.clone(memory_format=torch.contiguous_format) df2[df2 <= 4] = nan return 2 * df2.pow(2) * (self.df1 + df2 - 2) / (self.df1 * (df2 - 2).pow(2) * (df2 - 4)) def rsample(self, sample_shape=torch.Size(())): shape = self._extended_shape(sample_shape) # X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2) # Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2) X1 = self._gamma1.rsample(sample_shape).view(shape) X2 = self._gamma2.rsample(sample_shape).view(shape) tiny = torch.finfo(X2.dtype).tiny X2.clamp_(min=tiny) Y = X1 / X2 Y.clamp_(min=tiny) return Y def log_prob(self, value): if self._validate_args: self._validate_sample(value) ct1 = self.df1 * 0.5 ct2 = self.df2 * 0.5 ct3 = self.df1 / self.df2 t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma() t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value) t3 = (ct1 + ct2) * torch.log1p(ct3 * value) return t1 + t2 - t3
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/fishersnedecor.py
0.949646
0.639328
fishersnedecor.py
pypi
from numbers import Number import math import torch from torch.distributions import constraints from torch.distributions.exp_family import ExponentialFamily from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs from torch.nn.functional import binary_cross_entropy_with_logits class ContinuousBernoulli(ExponentialFamily): r""" Creates a continuous Bernoulli distribution parameterized by :attr:`probs` or :attr:`logits` (but not both). The distribution is supported in [0, 1] and parameterized by 'probs' (in (0,1)) or 'logits' (real-valued). Note that, unlike the Bernoulli, 'probs' does not correspond to a probability and 'logits' does not correspond to log-odds, but the same names are used due to the similarity with the Bernoulli. See [1] for more details. Example:: >>> m = ContinuousBernoulli(torch.tensor([0.3])) >>> m.sample() tensor([ 0.2538]) Args: probs (Number, Tensor): (0,1) valued parameters logits (Number, Tensor): real valued parameters whose sigmoid matches 'probs' [1] The continuous Bernoulli: fixing a pervasive error in variational autoencoders, Loaiza-Ganem G and Cunningham JP, NeurIPS 2019. https://arxiv.org/abs/1907.06845 """ arg_constraints = {'probs': constraints.unit_interval, 'logits': constraints.real} support = constraints.unit_interval _mean_carrier_measure = 0 has_rsample = True def __init__(self, probs=None, logits=None, lims=(0.499, 0.501), validate_args=None): if (probs is None) == (logits is None): raise ValueError("Either `probs` or `logits` must be specified, but not both.") if probs is not None: is_scalar = isinstance(probs, Number) self.probs, = broadcast_all(probs) # validate 'probs' here if necessary as it is later clamped for numerical stability # close to 0 and 1, later on; otherwise the clamped 'probs' would always pass if validate_args is not None: if not self.arg_constraints['probs'].check(getattr(self, 'probs')).all(): raise ValueError("The parameter {} has invalid values".format('probs')) self.probs = clamp_probs(self.probs) else: is_scalar = isinstance(logits, Number) self.logits, = broadcast_all(logits) self._param = self.probs if probs is not None else self.logits if is_scalar: batch_shape = torch.Size() else: batch_shape = self._param.size() self._lims = lims super(ContinuousBernoulli, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(ContinuousBernoulli, _instance) new._lims = self._lims batch_shape = torch.Size(batch_shape) if 'probs' in self.__dict__: new.probs = self.probs.expand(batch_shape) new._param = new.probs if 'logits' in self.__dict__: new.logits = self.logits.expand(batch_shape) new._param = new.logits super(ContinuousBernoulli, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._param.new(*args, **kwargs) def _outside_unstable_region(self): return torch.max(torch.le(self.probs, self._lims[0]), torch.gt(self.probs, self._lims[1])) def _cut_probs(self): return torch.where(self._outside_unstable_region(), self.probs, self._lims[0] * torch.ones_like(self.probs)) def _cont_bern_log_norm(self): '''computes the log normalizing constant as a function of the 'probs' parameter''' cut_probs = self._cut_probs() cut_probs_below_half = torch.where(torch.le(cut_probs, 0.5), cut_probs, torch.zeros_like(cut_probs)) cut_probs_above_half = torch.where(torch.ge(cut_probs, 0.5), cut_probs, torch.ones_like(cut_probs)) log_norm = torch.log(torch.abs(torch.log1p(-cut_probs) - torch.log(cut_probs))) - torch.where( torch.le(cut_probs, 0.5), torch.log1p(-2.0 * cut_probs_below_half), torch.log(2.0 * cut_probs_above_half - 1.0)) x = torch.pow(self.probs - 0.5, 2) taylor = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * x) * x return torch.where(self._outside_unstable_region(), log_norm, taylor) @property def mean(self): cut_probs = self._cut_probs() mus = cut_probs / (2.0 * cut_probs - 1.0) + 1.0 / (torch.log1p(-cut_probs) - torch.log(cut_probs)) x = self.probs - 0.5 taylor = 0.5 + (1.0 / 3.0 + 16.0 / 45.0 * torch.pow(x, 2)) * x return torch.where(self._outside_unstable_region(), mus, taylor) @property def stddev(self): return torch.sqrt(self.variance) @property def variance(self): cut_probs = self._cut_probs() vars = cut_probs * (cut_probs - 1.0) / torch.pow(1.0 - 2.0 * cut_probs, 2) + 1.0 / torch.pow( torch.log1p(-cut_probs) - torch.log(cut_probs), 2) x = torch.pow(self.probs - 0.5, 2) taylor = 1.0 / 12.0 - (1.0 / 15.0 - 128. / 945.0 * x) * x return torch.where(self._outside_unstable_region(), vars, taylor) @lazy_property def logits(self): return probs_to_logits(self.probs, is_binary=True) @lazy_property def probs(self): return clamp_probs(logits_to_probs(self.logits, is_binary=True)) @property def param_shape(self): return self._param.size() def sample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) with torch.no_grad(): return self.icdf(u) def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) return self.icdf(u) def log_prob(self, value): if self._validate_args: self._validate_sample(value) logits, value = broadcast_all(self.logits, value) return -binary_cross_entropy_with_logits(logits, value, reduction='none') + self._cont_bern_log_norm() def cdf(self, value): if self._validate_args: self._validate_sample(value) cut_probs = self._cut_probs() cdfs = (torch.pow(cut_probs, value) * torch.pow(1.0 - cut_probs, 1.0 - value) + cut_probs - 1.0) / (2.0 * cut_probs - 1.0) unbounded_cdfs = torch.where(self._outside_unstable_region(), cdfs, value) return torch.where( torch.le(value, 0.0), torch.zeros_like(value), torch.where(torch.ge(value, 1.0), torch.ones_like(value), unbounded_cdfs)) def icdf(self, value): if self._validate_args: self._validate_sample(value) cut_probs = self._cut_probs() return torch.where( self._outside_unstable_region(), (torch.log1p(-cut_probs + value * (2.0 * cut_probs - 1.0)) - torch.log1p(-cut_probs)) / (torch.log(cut_probs) - torch.log1p(-cut_probs)), value) def entropy(self): log_probs0 = torch.log1p(-self.probs) log_probs1 = torch.log(self.probs) return self.mean * (log_probs0 - log_probs1) - self._cont_bern_log_norm() - log_probs0 @property def _natural_params(self): return (self.logits, ) def _log_normalizer(self, x): """computes the log normalizing constant as a function of the natural parameter""" out_unst_reg = torch.max(torch.le(x, self._lims[0] - 0.5), torch.gt(x, self._lims[1] - 0.5)) cut_nat_params = torch.where(out_unst_reg, x, (self._lims[0] - 0.5) * torch.ones_like(x)) log_norm = torch.log(torch.abs(torch.exp(cut_nat_params) - 1.0)) - torch.log(torch.abs(cut_nat_params)) taylor = 0.5 * x + torch.pow(x, 2) / 24.0 - torch.pow(x, 4) / 2880.0 return torch.where(out_unst_reg, log_norm, taylor)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/continuous_bernoulli.py
0.939824
0.492554
continuous_bernoulli.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.exp_family import ExponentialFamily from torch.distributions.utils import broadcast_all class Poisson(ExponentialFamily): r""" Creates a Poisson distribution parameterized by :attr:`rate`, the rate parameter. Samples are nonnegative integers, with a pmf given by .. math:: \mathrm{rate}^k \frac{e^{-\mathrm{rate}}}{k!} Example:: >>> m = Poisson(torch.tensor([4])) >>> m.sample() tensor([ 3.]) Args: rate (Number, Tensor): the rate parameter """ arg_constraints = {'rate': constraints.positive} support = constraints.nonnegative_integer @property def mean(self): return self.rate @property def variance(self): return self.rate def __init__(self, rate, validate_args=None): self.rate, = broadcast_all(rate) if isinstance(rate, Number): batch_shape = torch.Size() else: batch_shape = self.rate.size() super(Poisson, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Poisson, _instance) batch_shape = torch.Size(batch_shape) new.rate = self.rate.expand(batch_shape) super(Poisson, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def sample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) with torch.no_grad(): return torch.poisson(self.rate.expand(shape)) def log_prob(self, value): if self._validate_args: self._validate_sample(value) rate, value = broadcast_all(self.rate, value) return (rate.log() * value) - rate - (value + 1).lgamma() @property def _natural_params(self): return (torch.log(self.rate), ) def _log_normalizer(self, x): return torch.exp(x)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/poisson.py
0.952153
0.537406
poisson.py
pypi
import torch from torch.distributions import constraints from torch.distributions.categorical import Categorical from torch.distributions.utils import clamp_probs, broadcast_all from torch.distributions.distribution import Distribution from torch.distributions.transformed_distribution import TransformedDistribution from torch.distributions.transforms import ExpTransform class ExpRelaxedCategorical(Distribution): r""" Creates a ExpRelaxedCategorical parameterized by :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both). Returns the log of a point in the simplex. Based on the interface to :class:`OneHotCategorical`. Implementation based on [1]. See also: :func:`torch.distributions.OneHotCategorical` Args: temperature (Tensor): relaxation temperature probs (Tensor): event probabilities logits (Tensor): the log probability of each event. [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables (Maddison et al, 2017) [2] Categorical Reparametrization with Gumbel-Softmax (Jang et al, 2017) """ arg_constraints = {'probs': constraints.simplex, 'logits': constraints.real} support = constraints.real has_rsample = True def __init__(self, temperature, probs=None, logits=None, validate_args=None): self._categorical = Categorical(probs, logits) self.temperature = temperature batch_shape = self._categorical.batch_shape event_shape = self._categorical.param_shape[-1:] super(ExpRelaxedCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(ExpRelaxedCategorical, _instance) batch_shape = torch.Size(batch_shape) new.temperature = self.temperature new._categorical = self._categorical.expand(batch_shape) super(ExpRelaxedCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._categorical._new(*args, **kwargs) @property def param_shape(self): return self._categorical.param_shape @property def logits(self): return self._categorical.logits @property def probs(self): return self._categorical.probs def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) uniforms = clamp_probs(torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device)) gumbels = -((-(uniforms.log())).log()) scores = (self.logits + gumbels) / self.temperature return scores - scores.logsumexp(dim=-1, keepdim=True) def log_prob(self, value): K = self._categorical._num_events if self._validate_args: self._validate_sample(value) logits, value = broadcast_all(self.logits, value) log_scale = (torch.full_like(self.temperature, float(K)).lgamma() - self.temperature.log().mul(-(K - 1))) score = logits - value.mul(self.temperature) score = (score - score.logsumexp(dim=-1, keepdim=True)).sum(-1) return score + log_scale class RelaxedOneHotCategorical(TransformedDistribution): r""" Creates a RelaxedOneHotCategorical distribution parametrized by :attr:`temperature`, and either :attr:`probs` or :attr:`logits`. This is a relaxed version of the :class:`OneHotCategorical` distribution, so its samples are on simplex, and are reparametrizable. Example:: >>> m = RelaxedOneHotCategorical(torch.tensor([2.2]), torch.tensor([0.1, 0.2, 0.3, 0.4])) >>> m.sample() tensor([ 0.1294, 0.2324, 0.3859, 0.2523]) Args: temperature (Tensor): relaxation temperature probs (Tensor): event probabilities logits (Tensor): the log probability of each event. """ arg_constraints = {'probs': constraints.simplex, 'logits': constraints.real} support = constraints.simplex has_rsample = True def __init__(self, temperature, probs=None, logits=None, validate_args=None): base_dist = ExpRelaxedCategorical(temperature, probs, logits) super(RelaxedOneHotCategorical, self).__init__(base_dist, ExpTransform(), validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(RelaxedOneHotCategorical, _instance) return super(RelaxedOneHotCategorical, self).expand(batch_shape, _instance=new) @property def temperature(self): return self.base_dist.temperature @property def logits(self): return self.base_dist.logits @property def probs(self): return self.base_dist.probs
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/relaxed_categorical.py
0.959068
0.703637
relaxed_categorical.py
pypi
import math import numbers import weakref import torch import torch.nn.functional as F from torch.distributions import constraints from torch.distributions.utils import (_sum_rightmost, broadcast_all, lazy_property) from torch.nn.functional import pad from torch.nn.functional import softplus __all__ = [ 'AbsTransform', 'AffineTransform', 'CatTransform', 'ComposeTransform', 'ExpTransform', 'LowerCholeskyTransform', 'PowerTransform', 'SigmoidTransform', 'TanhTransform', 'SoftmaxTransform', 'StackTransform', 'StickBreakingTransform', 'Transform', 'identity_transform', ] class Transform(object): """ Abstract class for invertable transformations with computable log det jacobians. They are primarily used in :class:`torch.distributions.TransformedDistribution`. Caching is useful for transforms whose inverses are either expensive or numerically unstable. Note that care must be taken with memoized values since the autograd graph may be reversed. For example while the following works with or without caching:: y = t(x) t.log_abs_det_jacobian(x, y).backward() # x will receive gradients. However the following will error when caching due to dependency reversal:: y = t(x) z = t.inv(y) grad(z.sum(), [y]) # error because z is x Derived classes should implement one or both of :meth:`_call` or :meth:`_inverse`. Derived classes that set `bijective=True` should also implement :meth:`log_abs_det_jacobian`. Args: cache_size (int): Size of cache. If zero, no caching is done. If one, the latest single value is cached. Only 0 and 1 are supported. Attributes: domain (:class:`~torch.distributions.constraints.Constraint`): The constraint representing valid inputs to this transform. codomain (:class:`~torch.distributions.constraints.Constraint`): The constraint representing valid outputs to this transform which are inputs to the inverse transform. bijective (bool): Whether this transform is bijective. A transform ``t`` is bijective iff ``t.inv(t(x)) == x`` and ``t(t.inv(y)) == y`` for every ``x`` in the domain and ``y`` in the codomain. Transforms that are not bijective should at least maintain the weaker pseudoinverse properties ``t(t.inv(t(x)) == t(x)`` and ``t.inv(t(t.inv(y))) == t.inv(y)``. sign (int or Tensor): For bijective univariate transforms, this should be +1 or -1 depending on whether transform is monotone increasing or decreasing. event_dim (int): Number of dimensions that are correlated together in the transform ``event_shape``. This should be 0 for pointwise transforms, 1 for transforms that act jointly on vectors, 2 for transforms that act jointly on matrices, etc. """ bijective = False event_dim = 0 def __init__(self, cache_size=0): self._cache_size = cache_size self._inv = None if cache_size == 0: pass # default behavior elif cache_size == 1: self._cached_x_y = None, None else: raise ValueError('cache_size must be 0 or 1') super(Transform, self).__init__() @property def inv(self): """ Returns the inverse :class:`Transform` of this transform. This should satisfy ``t.inv.inv is t``. """ inv = None if self._inv is not None: inv = self._inv() if inv is None: inv = _InverseTransform(self) self._inv = weakref.ref(inv) return inv @property def sign(self): """ Returns the sign of the determinant of the Jacobian, if applicable. In general this only makes sense for bijective transforms. """ raise NotImplementedError def __eq__(self, other): return self is other def __ne__(self, other): # Necessary for Python2 return not self.__eq__(other) def __call__(self, x): """ Computes the transform `x => y`. """ if self._cache_size == 0: return self._call(x) x_old, y_old = self._cached_x_y if x is x_old: return y_old y = self._call(x) self._cached_x_y = x, y return y def _inv_call(self, y): """ Inverts the transform `y => x`. """ if self._cache_size == 0: return self._inverse(y) x_old, y_old = self._cached_x_y if y is y_old: return x_old x = self._inverse(y) self._cached_x_y = x, y return x def _call(self, x): """ Abstract method to compute forward transformation. """ raise NotImplementedError def _inverse(self, y): """ Abstract method to compute inverse transformation. """ raise NotImplementedError def log_abs_det_jacobian(self, x, y): """ Computes the log det jacobian `log |dy/dx|` given input and output. """ raise NotImplementedError def __repr__(self): return self.__class__.__name__ + '()' class _InverseTransform(Transform): """ Inverts a single :class:`Transform`. This class is private; please instead use the ``Transform.inv`` property. """ def __init__(self, transform): super(_InverseTransform, self).__init__() self._inv = transform @constraints.dependent_property def domain(self): return self._inv.codomain @constraints.dependent_property def codomain(self): return self._inv.domain @property def bijective(self): return self._inv.bijective @property def sign(self): return self._inv.sign @property def event_dim(self): return self._inv.event_dim @property def inv(self): return self._inv def __eq__(self, other): if not isinstance(other, _InverseTransform): return False return self._inv == other._inv def __call__(self, x): return self._inv._inv_call(x) def log_abs_det_jacobian(self, x, y): return -self._inv.log_abs_det_jacobian(y, x) class ComposeTransform(Transform): """ Composes multiple transforms in a chain. The transforms being composed are responsible for caching. Args: parts (list of :class:`Transform`): A list of transforms to compose. """ def __init__(self, parts): super(ComposeTransform, self).__init__() self.parts = parts def __eq__(self, other): if not isinstance(other, ComposeTransform): return False return self.parts == other.parts @constraints.dependent_property def domain(self): if not self.parts: return constraints.real return self.parts[0].domain @constraints.dependent_property def codomain(self): if not self.parts: return constraints.real return self.parts[-1].codomain @lazy_property def bijective(self): return all(p.bijective for p in self.parts) @lazy_property def sign(self): sign = 1 for p in self.parts: sign = sign * p.sign return sign @lazy_property def event_dim(self): return max(p.event_dim for p in self.parts) if self.parts else 0 @property def inv(self): inv = None if self._inv is not None: inv = self._inv() if inv is None: inv = ComposeTransform([p.inv for p in reversed(self.parts)]) self._inv = weakref.ref(inv) inv._inv = weakref.ref(self) return inv def __call__(self, x): for part in self.parts: x = part(x) return x def log_abs_det_jacobian(self, x, y): if not self.parts: return torch.zeros_like(x) result = 0 for part in self.parts[:-1]: y_tmp = part(x) result = result + _sum_rightmost(part.log_abs_det_jacobian(x, y_tmp), self.event_dim - part.event_dim) x = y_tmp part = self.parts[-1] result = result + _sum_rightmost(part.log_abs_det_jacobian(x, y), self.event_dim - part.event_dim) return result def __repr__(self): fmt_string = self.__class__.__name__ + '(\n ' fmt_string += ',\n '.join([p.__repr__() for p in self.parts]) fmt_string += '\n)' return fmt_string identity_transform = ComposeTransform([]) class ExpTransform(Transform): r""" Transform via the mapping :math:`y = \exp(x)`. """ domain = constraints.real codomain = constraints.positive bijective = True sign = +1 def __eq__(self, other): return isinstance(other, ExpTransform) def _call(self, x): return x.exp() def _inverse(self, y): return y.log() def log_abs_det_jacobian(self, x, y): return x class PowerTransform(Transform): r""" Transform via the mapping :math:`y = x^{\text{exponent}}`. """ domain = constraints.positive codomain = constraints.positive bijective = True sign = +1 def __init__(self, exponent, cache_size=0): super(PowerTransform, self).__init__(cache_size=cache_size) self.exponent, = broadcast_all(exponent) def __eq__(self, other): if not isinstance(other, PowerTransform): return False return self.exponent.eq(other.exponent).all().item() def _call(self, x): return x.pow(self.exponent) def _inverse(self, y): return y.pow(1 / self.exponent) def log_abs_det_jacobian(self, x, y): return (self.exponent * y / x).abs().log() def _clipped_sigmoid(x): finfo = torch.finfo(x.dtype) return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1. - finfo.eps) class SigmoidTransform(Transform): r""" Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`. """ domain = constraints.real codomain = constraints.unit_interval bijective = True sign = +1 def __eq__(self, other): return isinstance(other, SigmoidTransform) def _call(self, x): return _clipped_sigmoid(x) def _inverse(self, y): finfo = torch.finfo(y.dtype) y = y.clamp(min=finfo.tiny, max=1. - finfo.eps) return y.log() - (-y).log1p() def log_abs_det_jacobian(self, x, y): return -F.softplus(-x) - F.softplus(x) class TanhTransform(Transform): r""" Transform via the mapping :math:`y = \tanh(x)`. It is equivalent to ``` ComposeTransform([AffineTransform(0., 2.), SigmoidTransform(), AffineTransform(-1., 2.)]) ``` However this might not be numerically stable, thus it is recommended to use `TanhTransform` instead. Note that one should use `cache_size=1` when it comes to `NaN/Inf` values. """ domain = constraints.real codomain = constraints.interval(-1.0, 1.0) bijective = True sign = +1 @staticmethod def atanh(x): return 0.5 * (x.log1p() - (-x).log1p()) def __eq__(self, other): return isinstance(other, TanhTransform) def _call(self, x): return x.tanh() def _inverse(self, y): # We do not clamp to the boundary here as it may degrade the performance of certain algorithms. # one should use `cache_size=1` instead return self.atanh(y) def log_abs_det_jacobian(self, x, y): # We use a formula that is more numerically stable, see details in the following link # https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py#L69-L80 return 2. * (math.log(2.) - x - softplus(-2. * x)) class AbsTransform(Transform): r""" Transform via the mapping :math:`y = |x|`. """ domain = constraints.real codomain = constraints.positive def __eq__(self, other): return isinstance(other, AbsTransform) def _call(self, x): return x.abs() def _inverse(self, y): return y class AffineTransform(Transform): r""" Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`. Args: loc (Tensor or float): Location parameter. scale (Tensor or float): Scale parameter. event_dim (int): Optional size of `event_shape`. This should be zero for univariate random variables, 1 for distributions over vectors, 2 for distributions over matrices, etc. """ domain = constraints.real codomain = constraints.real bijective = True def __init__(self, loc, scale, event_dim=0, cache_size=0): super(AffineTransform, self).__init__(cache_size=cache_size) self.loc = loc self.scale = scale self.event_dim = event_dim def __eq__(self, other): if not isinstance(other, AffineTransform): return False if isinstance(self.loc, numbers.Number) and isinstance(other.loc, numbers.Number): if self.loc != other.loc: return False else: if not (self.loc == other.loc).all().item(): return False if isinstance(self.scale, numbers.Number) and isinstance(other.scale, numbers.Number): if self.scale != other.scale: return False else: if not (self.scale == other.scale).all().item(): return False return True @property def sign(self): if isinstance(self.scale, numbers.Number): return 1 if self.scale > 0 else -1 if self.scale < 0 else 0 return self.scale.sign() def _call(self, x): return self.loc + self.scale * x def _inverse(self, y): return (y - self.loc) / self.scale def log_abs_det_jacobian(self, x, y): shape = x.shape scale = self.scale if isinstance(scale, numbers.Number): result = torch.full_like(x, math.log(abs(scale))) else: result = torch.abs(scale).log() if self.event_dim: result_size = result.size()[:-self.event_dim] + (-1,) result = result.view(result_size).sum(-1) shape = shape[:-self.event_dim] return result.expand(shape) class SoftmaxTransform(Transform): r""" Transform from unconstrained space to the simplex via :math:`y = \exp(x)` then normalizing. This is not bijective and cannot be used for HMC. However this acts mostly coordinate-wise (except for the final normalization), and thus is appropriate for coordinate-wise optimization algorithms. """ domain = constraints.real codomain = constraints.simplex event_dim = 1 def __eq__(self, other): return isinstance(other, SoftmaxTransform) def _call(self, x): logprobs = x probs = (logprobs - logprobs.max(-1, True)[0]).exp() return probs / probs.sum(-1, True) def _inverse(self, y): probs = y return probs.log() class StickBreakingTransform(Transform): """ Transform from unconstrained space to the simplex of one additional dimension via a stick-breaking process. This transform arises as an iterated sigmoid transform in a stick-breaking construction of the `Dirichlet` distribution: the first logit is transformed via sigmoid to the first probability and the probability of everything else, and then the process recurses. This is bijective and appropriate for use in HMC; however it mixes coordinates together and is less appropriate for optimization. """ domain = constraints.real codomain = constraints.simplex bijective = True event_dim = 1 def __eq__(self, other): return isinstance(other, StickBreakingTransform) def _call(self, x): offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1) z = _clipped_sigmoid(x - offset.log()) z_cumprod = (1 - z).cumprod(-1) y = pad(z, (0, 1), value=1) * pad(z_cumprod, (1, 0), value=1) return y def _inverse(self, y): y_crop = y[..., :-1] offset = y.shape[-1] - y.new_ones(y_crop.shape[-1]).cumsum(-1) sf = 1 - y_crop.cumsum(-1) # we clamp to make sure that sf is positive which sometimes does not # happen when y[-1] ~ 0 or y[:-1].sum() ~ 1 sf = torch.clamp(sf, min=torch.finfo(y.dtype).tiny) x = y_crop.log() - sf.log() + offset.log() return x def log_abs_det_jacobian(self, x, y): offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1) x = x - offset.log() # use the identity 1 - sigmoid(x) = exp(-x) * sigmoid(x) detJ = (-x + F.logsigmoid(x) + y[..., :-1].log()).sum(-1) return detJ class LowerCholeskyTransform(Transform): """ Transform from unconstrained matrices to lower-triangular matrices with nonnegative diagonal entries. This is useful for parameterizing positive definite matrices in terms of their Cholesky factorization. """ domain = constraints.real codomain = constraints.lower_cholesky event_dim = 2 def __eq__(self, other): return isinstance(other, LowerCholeskyTransform) def _call(self, x): return x.tril(-1) + x.diagonal(dim1=-2, dim2=-1).exp().diag_embed() def _inverse(self, y): return y.tril(-1) + y.diagonal(dim1=-2, dim2=-1).log().diag_embed() class CatTransform(Transform): """ Transform functor that applies a sequence of transforms `tseq` component-wise to each submatrix at `dim`, of length `lengths[dim]`, in a way compatible with :func:`torch.cat`. Example:: x0 = torch.cat([torch.range(1, 10), torch.range(1, 10)], dim=0) x = torch.cat([x0, x0], dim=0) t0 = CatTransform([ExpTransform(), identity_transform], dim=0, lengths=[10, 10]) t = CatTransform([t0, t0], dim=0, lengths=[20, 20]) y = t(x) """ def __init__(self, tseq, dim=0, lengths=None): assert all(isinstance(t, Transform) for t in tseq) super(CatTransform, self).__init__() self.transforms = list(tseq) if lengths is None: lengths = [1] * len(self.transforms) self.lengths = list(lengths) assert len(self.lengths) == len(self.transforms) self.dim = dim @lazy_property def length(self): return sum(self.lengths) def _call(self, x): assert -x.dim() <= self.dim < x.dim() assert x.size(self.dim) == self.length yslices = [] start = 0 for trans, length in zip(self.transforms, self.lengths): xslice = x.narrow(self.dim, start, length) yslices.append(trans(xslice)) start = start + length # avoid += for jit compat return torch.cat(yslices, dim=self.dim) def _inverse(self, y): assert -y.dim() <= self.dim < y.dim() assert y.size(self.dim) == self.length xslices = [] start = 0 for trans, length in zip(self.transforms, self.lengths): yslice = y.narrow(self.dim, start, length) xslices.append(trans.inv(yslice)) start = start + length # avoid += for jit compat return torch.cat(xslices, dim=self.dim) def log_abs_det_jacobian(self, x, y): assert -x.dim() <= self.dim < x.dim() assert x.size(self.dim) == self.length assert -y.dim() <= self.dim < y.dim() assert y.size(self.dim) == self.length logdetjacs = [] start = 0 for trans, length in zip(self.transforms, self.lengths): xslice = x.narrow(self.dim, start, length) yslice = y.narrow(self.dim, start, length) logdetjacs.append(trans.log_abs_det_jacobian(xslice, yslice)) start = start + length # avoid += for jit compat return torch.cat(logdetjacs, dim=self.dim) @property def bijective(self): return all(t.bijective for t in self.transforms) @constraints.dependent_property def domain(self): return constraints.cat([t.domain for t in self.transforms], self.dim, self.lengths) @constraints.dependent_property def codomain(self): return constraints.cat([t.codomain for t in self.transforms], self.dim, self.lengths) class StackTransform(Transform): """ Transform functor that applies a sequence of transforms `tseq` component-wise to each submatrix at `dim` in a way compatible with :func:`torch.stack`. Example:: x = torch.stack([torch.range(1, 10), torch.range(1, 10)], dim=1) t = StackTransform([ExpTransform(), identity_transform], dim=1) y = t(x) """ def __init__(self, tseq, dim=0): assert all(isinstance(t, Transform) for t in tseq) super(StackTransform, self).__init__() self.transforms = list(tseq) self.dim = dim def _slice(self, z): return [z.select(self.dim, i) for i in range(z.size(self.dim))] def _call(self, x): assert -x.dim() <= self.dim < x.dim() assert x.size(self.dim) == len(self.transforms) yslices = [] for xslice, trans in zip(self._slice(x), self.transforms): yslices.append(trans(xslice)) return torch.stack(yslices, dim=self.dim) def _inverse(self, y): assert -y.dim() <= self.dim < y.dim() assert y.size(self.dim) == len(self.transforms) xslices = [] for yslice, trans in zip(self._slice(y), self.transforms): xslices.append(trans.inv(yslice)) return torch.stack(xslices, dim=self.dim) def log_abs_det_jacobian(self, x, y): assert -x.dim() <= self.dim < x.dim() assert x.size(self.dim) == len(self.transforms) assert -y.dim() <= self.dim < y.dim() assert y.size(self.dim) == len(self.transforms) logdetjacs = [] yslices = self._slice(y) xslices = self._slice(x) for xslice, yslice, trans in zip(xslices, yslices, self.transforms): logdetjacs.append(trans.log_abs_det_jacobian(xslice, yslice)) return torch.stack(logdetjacs, dim=self.dim) @property def bijective(self): return all(t.bijective for t in self.transforms) @constraints.dependent_property def domain(self): return constraints.stack([t.domain for t in self.transforms], self.dim) @constraints.dependent_property def codomain(self): return constraints.stack([t.codomain for t in self.transforms], self.dim)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/transforms.py
0.926495
0.651881
transforms.py
pypi
import math import torch from torch._six import inf from torch.distributions import constraints from torch.distributions.transforms import AbsTransform from torch.distributions.cauchy import Cauchy from torch.distributions.transformed_distribution import TransformedDistribution class HalfCauchy(TransformedDistribution): r""" Creates a half-normal distribution parameterized by `scale` where:: X ~ Cauchy(0, scale) Y = |X| ~ HalfCauchy(scale) Example:: >>> m = HalfCauchy(torch.tensor([1.0])) >>> m.sample() # half-cauchy distributed with scale=1 tensor([ 2.3214]) Args: scale (float or Tensor): scale of the full Cauchy distribution """ arg_constraints = {'scale': constraints.positive} support = constraints.positive has_rsample = True def __init__(self, scale, validate_args=None): base_dist = Cauchy(0, scale) super(HalfCauchy, self).__init__(base_dist, AbsTransform(), validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(HalfCauchy, _instance) return super(HalfCauchy, self).expand(batch_shape, _instance=new) @property def scale(self): return self.base_dist.scale @property def mean(self): return self.base_dist.mean @property def variance(self): return self.base_dist.variance def log_prob(self, value): value = torch.as_tensor(value, dtype=self.base_dist.scale.dtype, device=self.base_dist.scale.device) log_prob = self.base_dist.log_prob(value) + math.log(2) log_prob[value.expand(log_prob.shape) < 0] = -inf return log_prob def cdf(self, value): return 2 * self.base_dist.cdf(value) - 1 def icdf(self, prob): return self.base_dist.icdf((prob + 1) / 2) def entropy(self): return self.base_dist.entropy() - math.log(2)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/half_cauchy.py
0.931766
0.480174
half_cauchy.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import broadcast_all class Laplace(Distribution): r""" Creates a Laplace distribution parameterized by :attr:`loc` and :attr:'scale'. Example:: >>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0])) >>> m.sample() # Laplace distributed with loc=0, scale=1 tensor([ 0.1046]) Args: loc (float or Tensor): mean of the distribution scale (float or Tensor): scale of the distribution """ arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} support = constraints.real has_rsample = True @property def mean(self): return self.loc @property def variance(self): return 2 * self.scale.pow(2) @property def stddev(self): return (2 ** 0.5) * self.scale def __init__(self, loc, scale, validate_args=None): self.loc, self.scale = broadcast_all(loc, scale) if isinstance(loc, Number) and isinstance(scale, Number): batch_shape = torch.Size() else: batch_shape = self.loc.size() super(Laplace, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Laplace, _instance) batch_shape = torch.Size(batch_shape) new.loc = self.loc.expand(batch_shape) new.scale = self.scale.expand(batch_shape) super(Laplace, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) finfo = torch.finfo(self.loc.dtype) if torch._C._get_tracing_state(): # [JIT WORKAROUND] lack of support for .uniform_() u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1 return self.loc - self.scale * u.sign() * torch.log1p(-u.abs().clamp(min=finfo.tiny)) u = self.loc.new(shape).uniform_(finfo.eps - 1, 1) # TODO: If we ever implement tensor.nextafter, below is what we want ideally. # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5) return self.loc - self.scale * u.sign() * torch.log1p(-u.abs()) def log_prob(self, value): if self._validate_args: self._validate_sample(value) return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale def cdf(self, value): if self._validate_args: self._validate_sample(value) return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1(-(value - self.loc).abs() / self.scale) def icdf(self, value): if self._validate_args: self._validate_sample(value) term = value - 0.5 return self.loc - self.scale * (term).sign() * torch.log1p(-2 * term.abs()) def entropy(self): return 1 + torch.log(2 * self.scale)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/laplace.py
0.898288
0.455744
laplace.py
pypi
import torch from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import _sum_rightmost class Independent(Distribution): r""" Reinterprets some of the batch dims of a distribution as event dims. This is mainly useful for changing the shape of the result of :meth:`log_prob`. For example to create a diagonal Normal distribution with the same shape as a Multivariate Normal distribution (so they are interchangeable), you can:: >>> loc = torch.zeros(3) >>> scale = torch.ones(3) >>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale)) >>> [mvn.batch_shape, mvn.event_shape] [torch.Size(()), torch.Size((3,))] >>> normal = Normal(loc, scale) >>> [normal.batch_shape, normal.event_shape] [torch.Size((3,)), torch.Size(())] >>> diagn = Independent(normal, 1) >>> [diagn.batch_shape, diagn.event_shape] [torch.Size(()), torch.Size((3,))] Args: base_distribution (torch.distributions.distribution.Distribution): a base distribution reinterpreted_batch_ndims (int): the number of batch dims to reinterpret as event dims """ arg_constraints = {} def __init__(self, base_distribution, reinterpreted_batch_ndims, validate_args=None): if reinterpreted_batch_ndims > len(base_distribution.batch_shape): raise ValueError("Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), " "actual {} vs {}".format(reinterpreted_batch_ndims, len(base_distribution.batch_shape))) shape = base_distribution.batch_shape + base_distribution.event_shape event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape) batch_shape = shape[:len(shape) - event_dim] event_shape = shape[len(shape) - event_dim:] self.base_dist = base_distribution self.reinterpreted_batch_ndims = reinterpreted_batch_ndims super(Independent, self).__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Independent, _instance) batch_shape = torch.Size(batch_shape) new.base_dist = self.base_dist.expand(batch_shape + self.event_shape[:self.reinterpreted_batch_ndims]) new.reinterpreted_batch_ndims = self.reinterpreted_batch_ndims super(Independent, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new @property def has_rsample(self): return self.base_dist.has_rsample @property def has_enumerate_support(self): if self.reinterpreted_batch_ndims > 0: return False return self.base_dist.has_enumerate_support @constraints.dependent_property def support(self): return self.base_dist.support @property def mean(self): return self.base_dist.mean @property def variance(self): return self.base_dist.variance def sample(self, sample_shape=torch.Size()): return self.base_dist.sample(sample_shape) def rsample(self, sample_shape=torch.Size()): return self.base_dist.rsample(sample_shape) def log_prob(self, value): log_prob = self.base_dist.log_prob(value) return _sum_rightmost(log_prob, self.reinterpreted_batch_ndims) def entropy(self): entropy = self.base_dist.entropy() return _sum_rightmost(entropy, self.reinterpreted_batch_ndims) def enumerate_support(self, expand=True): if self.reinterpreted_batch_ndims > 0: raise NotImplementedError("Enumeration over cartesian product is not implemented") return self.base_dist.enumerate_support(expand=expand) def __repr__(self): return self.__class__.__name__ + '({}, {})'.format(self.base_dist, self.reinterpreted_batch_ndims)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/independent.py
0.948728
0.594492
independent.py
pypi
import torch from torch.distributions import constraints from torch.distributions.normal import Normal from torch.distributions.transformed_distribution import TransformedDistribution from torch.distributions.transforms import StickBreakingTransform class LogisticNormal(TransformedDistribution): r""" Creates a logistic-normal distribution parameterized by :attr:`loc` and :attr:`scale` that define the base `Normal` distribution transformed with the `StickBreakingTransform` such that:: X ~ LogisticNormal(loc, scale) Y = log(X / (1 - X.cumsum(-1)))[..., :-1] ~ Normal(loc, scale) Args: loc (float or Tensor): mean of the base distribution scale (float or Tensor): standard deviation of the base distribution Example:: >>> # logistic-normal distributed with mean=(0, 0, 0) and stddev=(1, 1, 1) >>> # of the base Normal distribution >>> m = distributions.LogisticNormal(torch.tensor([0.0] * 3), torch.tensor([1.0] * 3)) >>> m.sample() tensor([ 0.7653, 0.0341, 0.0579, 0.1427]) """ arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} support = constraints.simplex has_rsample = True def __init__(self, loc, scale, validate_args=None): base_dist = Normal(loc, scale) super(LogisticNormal, self).__init__(base_dist, StickBreakingTransform(), validate_args=validate_args) # Adjust event shape since StickBreakingTransform adds 1 dimension self._event_shape = torch.Size([s + 1 for s in self._event_shape]) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(LogisticNormal, _instance) return super(LogisticNormal, self).expand(batch_shape, _instance=new) @property def loc(self): return self.base_dist.loc @property def scale(self): return self.base_dist.scale
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/logistic_normal.py
0.932768
0.703685
logistic_normal.py
pypi
import torch from torch._six import inf from torch.distributions.distribution import Distribution from torch.distributions import Categorical from numbers import Number from torch.distributions import constraints from torch.distributions.utils import broadcast_all class Multinomial(Distribution): r""" Creates a Multinomial distribution parameterized by :attr:`total_count` and either :attr:`probs` or :attr:`logits` (but not both). The innermost dimension of :attr:`probs` indexes over categories. All other dimensions index over batches. Note that :attr:`total_count` need not be specified if only :meth:`log_prob` is called (see example below) .. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum, and it will be normalized to sum to 1. - :meth:`sample` requires a single shared `total_count` for all parameters and samples. - :meth:`log_prob` allows different `total_count` for each parameter and sample. Example:: >>> m = Multinomial(100, torch.tensor([ 1., 1., 1., 1.])) >>> x = m.sample() # equal probability of 0, 1, 2, 3 tensor([ 21., 24., 30., 25.]) >>> Multinomial(probs=torch.tensor([1., 1., 1., 1.])).log_prob(x) tensor([-4.1338]) Args: total_count (int): number of trials probs (Tensor): event probabilities logits (Tensor): event log probabilities """ arg_constraints = {'probs': constraints.simplex, 'logits': constraints.real} @property def mean(self): return self.probs * self.total_count @property def variance(self): return self.total_count * self.probs * (1 - self.probs) def __init__(self, total_count=1, probs=None, logits=None, validate_args=None): if not isinstance(total_count, Number): raise NotImplementedError('inhomogeneous total_count is not supported') self.total_count = total_count self._categorical = Categorical(probs=probs, logits=logits) batch_shape = self._categorical.batch_shape event_shape = self._categorical.param_shape[-1:] super(Multinomial, self).__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Multinomial, _instance) batch_shape = torch.Size(batch_shape) new.total_count = self.total_count new._categorical = self._categorical.expand(batch_shape) super(Multinomial, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._categorical._new(*args, **kwargs) @constraints.dependent_property def support(self): return constraints.integer_interval(0, self.total_count) @property def logits(self): return self._categorical.logits @property def probs(self): return self._categorical.probs @property def param_shape(self): return self._categorical.param_shape def sample(self, sample_shape=torch.Size()): sample_shape = torch.Size(sample_shape) samples = self._categorical.sample(torch.Size((self.total_count,)) + sample_shape) # samples.shape is (total_count, sample_shape, batch_shape), need to change it to # (sample_shape, batch_shape, total_count) shifted_idx = list(range(samples.dim())) shifted_idx.append(shifted_idx.pop(0)) samples = samples.permute(*shifted_idx) counts = samples.new(self._extended_shape(sample_shape)).zero_() counts.scatter_add_(-1, samples, torch.ones_like(samples)) return counts.type_as(self.probs) def log_prob(self, value): if self._validate_args: self._validate_sample(value) logits, value = broadcast_all(self.logits.clone(memory_format=torch.contiguous_format), value) log_factorial_n = torch.lgamma(value.sum(-1) + 1) log_factorial_xs = torch.lgamma(value + 1).sum(-1) logits[(value == 0) & (logits == -inf)] = 0 log_powers = (logits * value).sum(-1) return log_factorial_n - log_factorial_xs + log_powers
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/multinomial.py
0.951176
0.69529
multinomial.py
pypi
import math import torch from torch._six import inf, nan from torch.distributions import Chi2, constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import _standard_normal, broadcast_all class StudentT(Distribution): r""" Creates a Student's t-distribution parameterized by degree of freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale`. Example:: >>> m = StudentT(torch.tensor([2.0])) >>> m.sample() # Student's t-distributed with degrees of freedom=2 tensor([ 0.1046]) Args: df (float or Tensor): degrees of freedom loc (float or Tensor): mean of the distribution scale (float or Tensor): scale of the distribution """ arg_constraints = {'df': constraints.positive, 'loc': constraints.real, 'scale': constraints.positive} support = constraints.real has_rsample = True @property def mean(self): m = self.loc.clone(memory_format=torch.contiguous_format) m[self.df <= 1] = nan return m @property def variance(self): m = self.df.clone(memory_format=torch.contiguous_format) m[self.df > 2] = self.scale[self.df > 2].pow(2) * self.df[self.df > 2] / (self.df[self.df > 2] - 2) m[(self.df <= 2) & (self.df > 1)] = inf m[self.df <= 1] = nan return m def __init__(self, df, loc=0., scale=1., validate_args=None): self.df, self.loc, self.scale = broadcast_all(df, loc, scale) self._chi2 = Chi2(self.df) batch_shape = self.df.size() super(StudentT, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(StudentT, _instance) batch_shape = torch.Size(batch_shape) new.df = self.df.expand(batch_shape) new.loc = self.loc.expand(batch_shape) new.scale = self.scale.expand(batch_shape) new._chi2 = self._chi2.expand(batch_shape) super(StudentT, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def rsample(self, sample_shape=torch.Size()): # NOTE: This does not agree with scipy implementation as much as other distributions. # (see https://github.com/fritzo/notebooks/blob/master/debug-student-t.ipynb). Using DoubleTensor # parameters seems to help. # X ~ Normal(0, 1) # Z ~ Chi2(df) # Y = X / sqrt(Z / df) ~ StudentT(df) shape = self._extended_shape(sample_shape) X = _standard_normal(shape, dtype=self.df.dtype, device=self.df.device) Z = self._chi2.rsample(sample_shape) Y = X * torch.rsqrt(Z / self.df) return self.loc + self.scale * Y def log_prob(self, value): if self._validate_args: self._validate_sample(value) y = (value - self.loc) / self.scale Z = (self.scale.log() + 0.5 * self.df.log() + 0.5 * math.log(math.pi) + torch.lgamma(0.5 * self.df) - torch.lgamma(0.5 * (self.df + 1.))) return -0.5 * (self.df + 1.) * torch.log1p(y**2. / self.df) - Z def entropy(self): lbeta = torch.lgamma(0.5 * self.df) + math.lgamma(0.5) - torch.lgamma(0.5 * (self.df + 1)) return (self.scale.log() + 0.5 * (self.df + 1) * (torch.digamma(0.5 * (self.df + 1)) - torch.digamma(0.5 * self.df)) + 0.5 * self.df.log() + lbeta)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/studentT.py
0.945273
0.492554
studentT.py
pypi
from functools import update_wrapper from numbers import Number import torch import torch.nn.functional as F def broadcast_all(*values): r""" Given a list of values (possibly containing numbers), returns a list where each value is broadcasted based on the following rules: - `torch.*Tensor` instances are broadcasted as per :ref:`_broadcasting-semantics`. - numbers.Number instances (scalars) are upcast to tensors having the same size and type as the first tensor passed to `values`. If all the values are scalars, then they are upcasted to scalar Tensors. Args: values (list of `numbers.Number` or `torch.*Tensor`) Raises: ValueError: if any of the values is not a `numbers.Number` or `torch.*Tensor` instance """ if not all(torch.is_tensor(v) or isinstance(v, Number) for v in values): raise ValueError('Input arguments must all be instances of numbers.Number or torch.tensor.') if not all(map(torch.is_tensor, values)): options = dict(dtype=torch.get_default_dtype()) for value in values: if torch.is_tensor(value): options = dict(dtype=value.dtype, device=value.device) break values = [v if torch.is_tensor(v) else torch.tensor(v, **options) for v in values] return torch.broadcast_tensors(*values) def _standard_normal(shape, dtype, device): if torch._C._get_tracing_state(): # [JIT WORKAROUND] lack of support for .normal_() return torch.normal(torch.zeros(shape, dtype=dtype, device=device), torch.ones(shape, dtype=dtype, device=device)) return torch.empty(shape, dtype=dtype, device=device).normal_() def _sum_rightmost(value, dim): r""" Sum out ``dim`` many rightmost dimensions of a given tensor. Args: value (Tensor): A tensor of ``.dim()`` at least ``dim``. dim (int): The number of rightmost dims to sum out. """ if dim == 0: return value required_shape = value.shape[:-dim] + (-1,) return value.reshape(required_shape).sum(-1) def logits_to_probs(logits, is_binary=False): r""" Converts a tensor of logits into probabilities. Note that for the binary case, each value denotes log odds, whereas for the multi-dimensional case, the values along the last dimension denote the log probabilities (possibly unnormalized) of the events. """ if is_binary: return torch.sigmoid(logits) return F.softmax(logits, dim=-1) def clamp_probs(probs): eps = torch.finfo(probs.dtype).eps return probs.clamp(min=eps, max=1 - eps) def probs_to_logits(probs, is_binary=False): r""" Converts a tensor of probabilities into logits. For the binary case, this denotes the probability of occurrence of the event indexed by `1`. For the multi-dimensional case, the values along the last dimension denote the probabilities of occurrence of each of the events. """ ps_clamped = clamp_probs(probs) if is_binary: return torch.log(ps_clamped) - torch.log1p(-ps_clamped) return torch.log(ps_clamped) class lazy_property(object): r""" Used as a decorator for lazy loading of class attributes. This uses a non-data descriptor that calls the wrapped method to compute the property on first call; thereafter replacing the wrapped method into an instance attribute. """ def __init__(self, wrapped): self.wrapped = wrapped update_wrapper(self, wrapped) def __get__(self, instance, obj_type=None): if instance is None: return self with torch.enable_grad(): value = self.wrapped(instance) setattr(instance, self.wrapped.__name__, value) return value
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/utils.py
0.945045
0.721792
utils.py
pypi
import torch from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.distributions import constraints from torch.distributions.exp_family import ExponentialFamily # This helper is exposed for testing. def _Dirichlet_backward(x, concentration, grad_output): total = concentration.sum(-1, True).expand_as(concentration) grad = torch._dirichlet_grad(x, concentration, total) return grad * (grad_output - (x * grad_output).sum(-1, True)) class _Dirichlet(Function): @staticmethod def forward(ctx, concentration): x = torch._sample_dirichlet(concentration) ctx.save_for_backward(x, concentration) return x @staticmethod @once_differentiable def backward(ctx, grad_output): x, concentration = ctx.saved_tensors return _Dirichlet_backward(x, concentration, grad_output) class Dirichlet(ExponentialFamily): r""" Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`. Example:: >>> m = Dirichlet(torch.tensor([0.5, 0.5])) >>> m.sample() # Dirichlet distributed with concentrarion concentration tensor([ 0.1046, 0.8954]) Args: concentration (Tensor): concentration parameter of the distribution (often referred to as alpha) """ arg_constraints = {'concentration': constraints.positive} support = constraints.simplex has_rsample = True def __init__(self, concentration, validate_args=None): if concentration.dim() < 1: raise ValueError("`concentration` parameter must be at least one-dimensional.") self.concentration = concentration batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:] super(Dirichlet, self).__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Dirichlet, _instance) batch_shape = torch.Size(batch_shape) new.concentration = self.concentration.expand(batch_shape + self.event_shape) super(Dirichlet, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new def rsample(self, sample_shape=()): shape = self._extended_shape(sample_shape) concentration = self.concentration.expand(shape) return _Dirichlet.apply(concentration) def log_prob(self, value): if self._validate_args: self._validate_sample(value) return ((torch.log(value) * (self.concentration - 1.0)).sum(-1) + torch.lgamma(self.concentration.sum(-1)) - torch.lgamma(self.concentration).sum(-1)) @property def mean(self): return self.concentration / self.concentration.sum(-1, True) @property def variance(self): con0 = self.concentration.sum(-1, True) return self.concentration * (con0 - self.concentration) / (con0.pow(2) * (con0 + 1)) def entropy(self): k = self.concentration.size(-1) a0 = self.concentration.sum(-1) return (torch.lgamma(self.concentration).sum(-1) - torch.lgamma(a0) - (k - a0) * torch.digamma(a0) - ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1)) @property def _natural_params(self): return (self.concentration, ) def _log_normalizer(self, x): return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1))
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/dirichlet.py
0.966315
0.569374
dirichlet.py
pypi
import torch from torch.distributions import constraints from torch.distributions.categorical import Categorical from torch.distributions.distribution import Distribution class OneHotCategorical(Distribution): r""" Creates a one-hot categorical distribution parameterized by :attr:`probs` or :attr:`logits`. Samples are one-hot coded vectors of size ``probs.size(-1)``. .. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum, and it will be normalized to sum to 1. See also: :func:`torch.distributions.Categorical` for specifications of :attr:`probs` and :attr:`logits`. Example:: >>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ])) >>> m.sample() # equal probability of 0, 1, 2, 3 tensor([ 0., 0., 0., 1.]) Args: probs (Tensor): event probabilities logits (Tensor): event log probabilities """ arg_constraints = {'probs': constraints.simplex, 'logits': constraints.real} support = constraints.simplex has_enumerate_support = True def __init__(self, probs=None, logits=None, validate_args=None): self._categorical = Categorical(probs, logits) batch_shape = self._categorical.batch_shape event_shape = self._categorical.param_shape[-1:] super(OneHotCategorical, self).__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(OneHotCategorical, _instance) batch_shape = torch.Size(batch_shape) new._categorical = self._categorical.expand(batch_shape) super(OneHotCategorical, new).__init__(batch_shape, self.event_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._categorical._new(*args, **kwargs) @property def _param(self): return self._categorical._param @property def probs(self): return self._categorical.probs @property def logits(self): return self._categorical.logits @property def mean(self): return self._categorical.probs @property def variance(self): return self._categorical.probs * (1 - self._categorical.probs) @property def param_shape(self): return self._categorical.param_shape def sample(self, sample_shape=torch.Size()): sample_shape = torch.Size(sample_shape) probs = self._categorical.probs num_events = self._categorical._num_events indices = self._categorical.sample(sample_shape) return torch.nn.functional.one_hot(indices, num_events).to(probs) def log_prob(self, value): if self._validate_args: self._validate_sample(value) indices = value.max(-1)[1] return self._categorical.log_prob(indices) def entropy(self): return self._categorical.entropy() def enumerate_support(self, expand=True): n = self.event_shape[0] values = torch.eye(n, dtype=self._param.dtype, device=self._param.device) values = values.view((n,) + (1,) * len(self.batch_shape) + (n,)) if expand: values = values.expand((n,) + self.batch_shape + (n,)) return values
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/one_hot_categorical.py
0.957118
0.640819
one_hot_categorical.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.exp_family import ExponentialFamily from torch.distributions.utils import broadcast_all class Exponential(ExponentialFamily): r""" Creates a Exponential distribution parameterized by :attr:`rate`. Example:: >>> m = Exponential(torch.tensor([1.0])) >>> m.sample() # Exponential distributed with rate=1 tensor([ 0.1046]) Args: rate (float or Tensor): rate = 1 / scale of the distribution """ arg_constraints = {'rate': constraints.positive} support = constraints.positive has_rsample = True _mean_carrier_measure = 0 @property def mean(self): return self.rate.reciprocal() @property def stddev(self): return self.rate.reciprocal() @property def variance(self): return self.rate.pow(-2) def __init__(self, rate, validate_args=None): self.rate, = broadcast_all(rate) batch_shape = torch.Size() if isinstance(rate, Number) else self.rate.size() super(Exponential, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Exponential, _instance) batch_shape = torch.Size(batch_shape) new.rate = self.rate.expand(batch_shape) super(Exponential, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) if torch._C._get_tracing_state(): # [JIT WORKAROUND] lack of support for ._exponential() u = torch.rand(shape, dtype=self.rate.dtype, device=self.rate.device) return -(-u).log1p() / self.rate return self.rate.new(shape).exponential_() / self.rate def log_prob(self, value): if self._validate_args: self._validate_sample(value) return self.rate.log() - self.rate * value def cdf(self, value): if self._validate_args: self._validate_sample(value) return 1 - torch.exp(-self.rate * value) def icdf(self, value): if self._validate_args: self._validate_sample(value) return -torch.log(1 - value) / self.rate def entropy(self): return 1.0 - torch.log(self.rate) @property def _natural_params(self): return (-self.rate, ) def _log_normalizer(self, x): return -torch.log(-x)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/exponential.py
0.956012
0.438184
exponential.py
pypi
import torch from torch.distributions.distribution import Distribution from torch.distributions import Categorical from torch.distributions import constraints class MixtureSameFamily(Distribution): r""" The `MixtureSameFamily` distribution implements a (batch of) mixture distribution where all component are from different parameterizations of the same distribution type. It is parameterized by a `Categorical` "selecting distribution" (over `k` component) and a component distribution, i.e., a `Distribution` with a rightmost batch shape (equal to `[k]`) which indexes each (batch of) component. Examples:: # Construct Gaussian Mixture Model in 1D consisting of 5 equally # weighted normal distributions >>> mix = D.Categorical(torch.ones(5,)) >>> comp = D.Normal(torch.randn(5,), torch.rand(5,)) >>> gmm = MixtureSameFamily(mix, comp) # Construct Gaussian Mixture Modle in 2D consisting of 5 equally # weighted bivariate normal distributions >>> mix = D.Categorical(torch.ones(5,)) >>> comp = D.Independent(D.Normal( torch.randn(5,2), torch.rand(5,2)), 1) >>> gmm = MixtureSameFamily(mix, comp) # Construct a batch of 3 Gaussian Mixture Models in 2D each # consisting of 5 random weighted bivariate normal distributions >>> mix = D.Categorical(torch.rand(3,5)) >>> comp = D.Independent(D.Normal( torch.randn(3,5,2), torch.rand(3,5,2)), 1) >>> gmm = MixtureSameFamily(mix, comp) Args: mixture_distribution: `torch.distributions.Categorical`-like instance. Manages the probability of selecting component. The number of categories must match the rightmost batch dimension of the `component_distribution`. Must have either scalar `batch_shape` or `batch_shape` matching `component_distribution.batch_shape[:-1]` component_distribution: `torch.distributions.Distribution`-like instance. Right-most batch dimension indexes component. """ arg_constraints = {} has_rsample = False def __init__(self, mixture_distribution, component_distribution, validate_args=None): self._mixture_distribution = mixture_distribution self._component_distribution = component_distribution if not isinstance(self._mixture_distribution, Categorical): raise ValueError(" The Mixture distribution needs to be an " " instance of torch.distribtutions.Categorical") if not isinstance(self._component_distribution, Distribution): raise ValueError("The Component distribution need to be an " "instance of torch.distributions.Distribution") # Check that batch size matches mdbs = self._mixture_distribution.batch_shape cdbs = self._component_distribution.batch_shape[:-1] for size1, size2 in zip(reversed(mdbs), reversed(cdbs)): if size1 != 1 and size2 != 1 and size1 != size2: raise ValueError("`mixture_distribution.batch_shape` ({0}) is not " "compatible with `component_distribution." "batch_shape`({1})".format(mdbs, cdbs)) # Check that the number of mixture component matches km = self._mixture_distribution.logits.shape[-1] kc = self._component_distribution.batch_shape[-1] if km is not None and kc is not None and km != kc: raise ValueError("`mixture_distribution component` ({0}) does not" " equal `component_distribution.batch_shape[-1]`" " ({1})".format(km, kc)) self._num_component = km event_shape = self._component_distribution.event_shape self._event_ndims = len(event_shape) super(MixtureSameFamily, self).__init__(batch_shape=cdbs, event_shape=event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): batch_shape = torch.Size(batch_shape) batch_shape_comp = batch_shape + (self._num_component,) new = self._get_checked_instance(MixtureSameFamily, _instance) new._component_distribution = \ self._component_distribution.expand(batch_shape_comp) new._mixture_distribution = \ self._mixture_distribution.expand(batch_shape) new._num_component = self._num_component new._event_ndims = self._event_ndims event_shape = new._component_distribution.event_shape super(MixtureSameFamily, new).__init__(batch_shape=batch_shape, event_shape=event_shape, validate_args=False) new._validate_args = self._validate_args return new @constraints.dependent_property def support(self): # FIXME this may have the wrong shape when support contains batched # parameters return self._component_distribution.support @property def mixture_distribution(self): return self._mixture_distribution @property def component_distribution(self): return self._component_distribution @property def mean(self): probs = self._pad_mixture_dimensions(self.mixture_distribution.probs) return torch.sum(probs * self.component_distribution.mean, dim=-1 - self._event_ndims) # [B, E] @property def variance(self): # Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X]) probs = self._pad_mixture_dimensions(self.mixture_distribution.probs) mean_cond_var = torch.sum(probs * self.component_distribution.variance, dim=-1 - self._event_ndims) var_cond_mean = torch.sum(probs * (self.component_distribution.mean - self._pad(self.mean)).pow(2.0), dim=-1 - self._event_ndims) return mean_cond_var + var_cond_mean def cdf(self, x): x = self._pad(x) cdf_x = self.component_distribution.cdf(x) mix_prob = self.mixture_distribution.probs return torch.sum(cdf_x * mix_prob, dim=-1) def log_prob(self, x): x = self._pad(x) log_prob_x = self.component_distribution.log_prob(x) # [S, B, k] log_mix_prob = torch.log_softmax(self.mixture_distribution.logits, dim=-1) # [B, k] return torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) # [S, B] def sample(self, sample_shape=torch.Size()): with torch.no_grad(): sample_len = len(sample_shape) batch_len = len(self.batch_shape) gather_dim = sample_len + batch_len es = self.event_shape # mixture samples [n, B] mix_sample = self.mixture_distribution.sample(sample_shape) mix_shape = mix_sample.shape # component samples [n, B, k, E] comp_samples = self.component_distribution.sample(sample_shape) # Gather along the k dimension mix_sample_r = mix_sample.reshape( mix_shape + torch.Size([1] * (len(es) + 1))) mix_sample_r = mix_sample_r.repeat( torch.Size([1] * len(mix_shape)) + torch.Size([1]) + es) samples = torch.gather(comp_samples, gather_dim, mix_sample_r) return samples.squeeze(gather_dim) def _pad(self, x): return x.unsqueeze(-1 - self._event_ndims) def _pad_mixture_dimensions(self, x): dist_batch_ndims = self.batch_shape.numel() cat_batch_ndims = self.mixture_distribution.batch_shape.numel() pad_ndims = 0 if cat_batch_ndims == 1 else \ dist_batch_ndims - cat_batch_ndims xs = x.shape x = x.reshape(xs[:-1] + torch.Size(pad_ndims * [1]) + xs[-1:] + torch.Size(self._event_ndims * [1])) return x def __repr__(self): args_string = '\n {},\n {}'.format(self.mixture_distribution, self.component_distribution) return 'MixtureSameFamily' + '(' + args_string + ')'
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/mixture_same_family.py
0.943112
0.713145
mixture_same_family.py
pypi
r""" The following constraints are implemented: - ``constraints.boolean`` - ``constraints.cat`` - ``constraints.dependent`` - ``constraints.greater_than(lower_bound)`` - ``constraints.integer_interval(lower_bound, upper_bound)`` - ``constraints.interval(lower_bound, upper_bound)`` - ``constraints.lower_cholesky`` - ``constraints.lower_triangular`` - ``constraints.nonnegative_integer`` - ``constraints.positive`` - ``constraints.positive_definite`` - ``constraints.positive_integer`` - ``constraints.real`` - ``constraints.real_vector`` - ``constraints.simplex`` - ``constraints.stack`` - ``constraints.unit_interval`` """ import torch __all__ = [ 'Constraint', 'boolean', 'cat', 'dependent', 'dependent_property', 'greater_than', 'greater_than_eq', 'integer_interval', 'interval', 'half_open_interval', 'is_dependent', 'less_than', 'lower_cholesky', 'lower_triangular', 'nonnegative_integer', 'positive', 'positive_definite', 'positive_integer', 'real', 'real_vector', 'simplex', 'stack', 'unit_interval', ] class Constraint(object): """ Abstract base class for constraints. A constraint object represents a region over which a variable is valid, e.g. within which a variable can be optimized. """ def check(self, value): """ Returns a byte tensor of `sample_shape + batch_shape` indicating whether each event in value satisfies this constraint. """ raise NotImplementedError def __repr__(self): return self.__class__.__name__[1:] + '()' class _Dependent(Constraint): """ Placeholder for variables whose support depends on other variables. These variables obey no simple coordinate-wise constraints. """ def check(self, x): raise ValueError('Cannot determine validity of dependent constraint') def is_dependent(constraint): return isinstance(constraint, _Dependent) class _DependentProperty(property, _Dependent): """ Decorator that extends @property to act like a `Dependent` constraint when called on a class and act like a property when called on an object. Example:: class Uniform(Distribution): def __init__(self, low, high): self.low = low self.high = high @constraints.dependent_property def support(self): return constraints.interval(self.low, self.high) """ pass class _Boolean(Constraint): """ Constrain to the two values `{0, 1}`. """ def check(self, value): return (value == 0) | (value == 1) class _IntegerInterval(Constraint): """ Constrain to an integer interval `[lower_bound, upper_bound]`. """ def __init__(self, lower_bound, upper_bound): self.lower_bound = lower_bound self.upper_bound = upper_bound def check(self, value): return (value % 1 == 0) & (self.lower_bound <= value) & (value <= self.upper_bound) def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound) return fmt_string class _IntegerLessThan(Constraint): """ Constrain to an integer interval `(-inf, upper_bound]`. """ def __init__(self, upper_bound): self.upper_bound = upper_bound def check(self, value): return (value % 1 == 0) & (value <= self.upper_bound) def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(upper_bound={})'.format(self.upper_bound) return fmt_string class _IntegerGreaterThan(Constraint): """ Constrain to an integer interval `[lower_bound, inf)`. """ def __init__(self, lower_bound): self.lower_bound = lower_bound def check(self, value): return (value % 1 == 0) & (value >= self.lower_bound) def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(lower_bound={})'.format(self.lower_bound) return fmt_string class _Real(Constraint): """ Trivially constrain to the extended real line `[-inf, inf]`. """ def check(self, value): return value == value # False for NANs. class _GreaterThan(Constraint): """ Constrain to a real half line `(lower_bound, inf]`. """ def __init__(self, lower_bound): self.lower_bound = lower_bound def check(self, value): return self.lower_bound < value def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(lower_bound={})'.format(self.lower_bound) return fmt_string class _GreaterThanEq(Constraint): """ Constrain to a real half line `[lower_bound, inf)`. """ def __init__(self, lower_bound): self.lower_bound = lower_bound def check(self, value): return self.lower_bound <= value def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(lower_bound={})'.format(self.lower_bound) return fmt_string class _LessThan(Constraint): """ Constrain to a real half line `[-inf, upper_bound)`. """ def __init__(self, upper_bound): self.upper_bound = upper_bound def check(self, value): return value < self.upper_bound def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(upper_bound={})'.format(self.upper_bound) return fmt_string class _Interval(Constraint): """ Constrain to a real interval `[lower_bound, upper_bound]`. """ def __init__(self, lower_bound, upper_bound): self.lower_bound = lower_bound self.upper_bound = upper_bound def check(self, value): return (self.lower_bound <= value) & (value <= self.upper_bound) def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound) return fmt_string class _HalfOpenInterval(Constraint): """ Constrain to a real interval `[lower_bound, upper_bound)`. """ def __init__(self, lower_bound, upper_bound): self.lower_bound = lower_bound self.upper_bound = upper_bound def check(self, value): return (self.lower_bound <= value) & (value < self.upper_bound) def __repr__(self): fmt_string = self.__class__.__name__[1:] fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound) return fmt_string class _Simplex(Constraint): """ Constrain to the unit simplex in the innermost (rightmost) dimension. Specifically: `x >= 0` and `x.sum(-1) == 1`. """ def check(self, value): return torch.all(value >= 0, dim=-1) & ((value.sum(-1) - 1).abs() < 1e-6) class _LowerTriangular(Constraint): """ Constrain to lower-triangular square matrices. """ def check(self, value): value_tril = value.tril() return (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0] class _LowerCholesky(Constraint): """ Constrain to lower-triangular square matrices with positive diagonals. """ def check(self, value): value_tril = value.tril() lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0] positive_diagonal = (value.diagonal(dim1=-2, dim2=-1) > 0).min(-1)[0] return lower_triangular & positive_diagonal class _PositiveDefinite(Constraint): """ Constrain to positive-definite matrices. """ def check(self, value): matrix_shape = value.shape[-2:] batch_shape = value.unsqueeze(0).shape[:-2] # TODO: replace with batched linear algebra routine when one becomes available # note that `symeig()` returns eigenvalues in ascending order flattened_value = value.reshape((-1,) + matrix_shape) return torch.stack([v.symeig(eigenvectors=False)[0][:1] > 0.0 for v in flattened_value]).view(batch_shape) class _RealVector(Constraint): """ Constrain to real-valued vectors. This is the same as `constraints.real`, but additionally reduces across the `event_shape` dimension. """ def check(self, value): return torch.all(value == value, dim=-1) # False for NANs. class _Cat(Constraint): """ Constraint functor that applies a sequence of constraints `cseq` at the submatrices at dimension `dim`, each of size `lengths[dim]`, in a way compatible with :func:`torch.cat`. """ def __init__(self, cseq, dim=0, lengths=None): assert all(isinstance(c, Constraint) for c in cseq) self.cseq = list(cseq) if lengths is None: lengths = [1] * len(self.cseq) self.lengths = list(lengths) assert len(self.lengths) == len(self.cseq) self.dim = dim def check(self, value): assert -value.dim() <= self.dim < value.dim() checks = [] start = 0 for constr, length in zip(self.cseq, self.lengths): v = value.narrow(self.dim, start, length) checks.append(constr.check(v)) start = start + length # avoid += for jit compat return torch.cat(checks, self.dim) class _Stack(Constraint): """ Constraint functor that applies a sequence of constraints `cseq` at the submatrices at dimension `dim`, in a way compatible with :func:`torch.stack`. """ def __init__(self, cseq, dim=0): assert all(isinstance(c, Constraint) for c in cseq) self.cseq = list(cseq) self.dim = dim def check(self, value): assert -value.dim() <= self.dim < value.dim() vs = [value.select(self.dim, i) for i in range(value.size(self.dim))] return torch.stack([constr.check(v) for v, constr in zip(vs, self.cseq)], self.dim) # Public interface. dependent = _Dependent() dependent_property = _DependentProperty boolean = _Boolean() nonnegative_integer = _IntegerGreaterThan(0) positive_integer = _IntegerGreaterThan(1) integer_interval = _IntegerInterval real = _Real() real_vector = _RealVector() positive = _GreaterThan(0.) greater_than = _GreaterThan greater_than_eq = _GreaterThanEq less_than = _LessThan unit_interval = _Interval(0., 1.) interval = _Interval half_open_interval = _HalfOpenInterval simplex = _Simplex() lower_triangular = _LowerTriangular() lower_cholesky = _LowerCholesky() positive_definite = _PositiveDefinite() cat = _Cat stack = _Stack
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/constraints.py
0.95593
0.593904
constraints.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property from torch.nn.functional import binary_cross_entropy_with_logits class Geometric(Distribution): r""" Creates a Geometric distribution parameterized by :attr:`probs`, where :attr:`probs` is the probability of success of Bernoulli trials. It represents the probability that in :math:`k + 1` Bernoulli trials, the first :math:`k` trials failed, before seeing a success. Samples are non-negative integers [0, :math:`\inf`). Example:: >>> m = Geometric(torch.tensor([0.3])) >>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0 tensor([ 2.]) Args: probs (Number, Tensor): the probability of sampling `1`. Must be in range (0, 1] logits (Number, Tensor): the log-odds of sampling `1`. """ arg_constraints = {'probs': constraints.unit_interval, 'logits': constraints.real} support = constraints.nonnegative_integer def __init__(self, probs=None, logits=None, validate_args=None): if (probs is None) == (logits is None): raise ValueError("Either `probs` or `logits` must be specified, but not both.") if probs is not None: self.probs, = broadcast_all(probs) if not self.probs.gt(0).all(): raise ValueError('All elements of probs must be greater than 0') else: self.logits, = broadcast_all(logits) probs_or_logits = probs if probs is not None else logits if isinstance(probs_or_logits, Number): batch_shape = torch.Size() else: batch_shape = probs_or_logits.size() super(Geometric, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Geometric, _instance) batch_shape = torch.Size(batch_shape) if 'probs' in self.__dict__: new.probs = self.probs.expand(batch_shape) if 'logits' in self.__dict__: new.logits = self.logits.expand(batch_shape) super(Geometric, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new @property def mean(self): return 1. / self.probs - 1. @property def variance(self): return (1. / self.probs - 1.) / self.probs @lazy_property def logits(self): return probs_to_logits(self.probs, is_binary=True) @lazy_property def probs(self): return logits_to_probs(self.logits, is_binary=True) def sample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) tiny = torch.finfo(self.probs.dtype).tiny with torch.no_grad(): if torch._C._get_tracing_state(): # [JIT WORKAROUND] lack of support for .uniform_() u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) u = u.clamp(min=tiny) else: u = self.probs.new(shape).uniform_(tiny, 1) return (u.log() / (-self.probs).log1p()).floor() def log_prob(self, value): if self._validate_args: self._validate_sample(value) value, probs = broadcast_all(value, self.probs.clone(memory_format=torch.contiguous_format)) probs[(probs == 1) & (value == 0)] = 0 return value * (-probs).log1p() + self.probs.log() def entropy(self): return binary_cross_entropy_with_logits(self.logits, self.probs, reduction='none') / self.probs
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/geometric.py
0.942015
0.618636
geometric.py
pypi
import torch from torch._six import nan from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import probs_to_logits, logits_to_probs, lazy_property class Categorical(Distribution): r""" Creates a categorical distribution parameterized by either :attr:`probs` or :attr:`logits` (but not both). .. note:: It is equivalent to the distribution that :func:`torch.multinomial` samples from. Samples are integers from :math:`\{0, \ldots, K-1\}` where `K` is ``probs.size(-1)``. If :attr:`probs` is 1D with length-`K`, each element is the relative probability of sampling the class at that index. If :attr:`probs` is 2D, it is treated as a batch of relative probability vectors. .. note:: :attr:`probs` must be non-negative, finite and have a non-zero sum, and it will be normalized to sum to 1. See also: :func:`torch.multinomial` Example:: >>> m = Categorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ])) >>> m.sample() # equal probability of 0, 1, 2, 3 tensor(3) Args: probs (Tensor): event probabilities logits (Tensor): event log-odds """ arg_constraints = {'probs': constraints.simplex, 'logits': constraints.real} has_enumerate_support = True def __init__(self, probs=None, logits=None, validate_args=None): if (probs is None) == (logits is None): raise ValueError("Either `probs` or `logits` must be specified, but not both.") if probs is not None: if probs.dim() < 1: raise ValueError("`probs` parameter must be at least one-dimensional.") self.probs = probs / probs.sum(-1, keepdim=True) else: if logits.dim() < 1: raise ValueError("`logits` parameter must be at least one-dimensional.") self.logits = logits - logits.logsumexp(dim=-1, keepdim=True) self._param = self.probs if probs is not None else self.logits self._num_events = self._param.size()[-1] batch_shape = self._param.size()[:-1] if self._param.ndimension() > 1 else torch.Size() super(Categorical, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Categorical, _instance) batch_shape = torch.Size(batch_shape) param_shape = batch_shape + torch.Size((self._num_events,)) if 'probs' in self.__dict__: new.probs = self.probs.expand(param_shape) new._param = new.probs if 'logits' in self.__dict__: new.logits = self.logits.expand(param_shape) new._param = new.logits new._num_events = self._num_events super(Categorical, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._param.new(*args, **kwargs) @constraints.dependent_property def support(self): return constraints.integer_interval(0, self._num_events - 1) @lazy_property def logits(self): return probs_to_logits(self.probs) @lazy_property def probs(self): return logits_to_probs(self.logits) @property def param_shape(self): return self._param.size() @property def mean(self): return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device) @property def variance(self): return torch.full(self._extended_shape(), nan, dtype=self.probs.dtype, device=self.probs.device) def sample(self, sample_shape=torch.Size()): if not isinstance(sample_shape, torch.Size): sample_shape = torch.Size(sample_shape) probs_2d = self.probs.reshape(-1, self._num_events) samples_2d = torch.multinomial(probs_2d, sample_shape.numel(), True).T return samples_2d.reshape(self._extended_shape(sample_shape)) def log_prob(self, value): if self._validate_args: self._validate_sample(value) value = value.long().unsqueeze(-1) value, log_pmf = torch.broadcast_tensors(value, self.logits) value = value[..., :1] return log_pmf.gather(-1, value).squeeze(-1) def entropy(self): p_log_p = self.logits * self.probs return -p_log_p.sum(-1) def enumerate_support(self, expand=True): num_events = self._num_events values = torch.arange(num_events, dtype=torch.long, device=self._param.device) values = values.view((-1,) + (1,) * len(self._batch_shape)) if expand: values = values.expand((-1,) + self._batch_shape) return values
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/categorical.py
0.949295
0.643098
categorical.py
pypi
from __future__ import absolute_import, division, print_function import math import torch import torch.jit from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import broadcast_all, lazy_property def _eval_poly(y, coef): coef = list(coef) result = coef.pop() while coef: result = coef.pop() + y * result return result _I0_COEF_SMALL = [1.0, 3.5156229, 3.0899424, 1.2067492, 0.2659732, 0.360768e-1, 0.45813e-2] _I0_COEF_LARGE = [0.39894228, 0.1328592e-1, 0.225319e-2, -0.157565e-2, 0.916281e-2, -0.2057706e-1, 0.2635537e-1, -0.1647633e-1, 0.392377e-2] _I1_COEF_SMALL = [0.5, 0.87890594, 0.51498869, 0.15084934, 0.2658733e-1, 0.301532e-2, 0.32411e-3] _I1_COEF_LARGE = [0.39894228, -0.3988024e-1, -0.362018e-2, 0.163801e-2, -0.1031555e-1, 0.2282967e-1, -0.2895312e-1, 0.1787654e-1, -0.420059e-2] _COEF_SMALL = [_I0_COEF_SMALL, _I1_COEF_SMALL] _COEF_LARGE = [_I0_COEF_LARGE, _I1_COEF_LARGE] def _log_modified_bessel_fn(x, order=0): """ Returns ``log(I_order(x))`` for ``x > 0``, where `order` is either 0 or 1. """ assert order == 0 or order == 1 # compute small solution y = (x / 3.75) y = y * y small = _eval_poly(y, _COEF_SMALL[order]) if order == 1: small = x.abs() * small small = small.log() # compute large solution y = 3.75 / x large = x - 0.5 * x.log() + _eval_poly(y, _COEF_LARGE[order]).log() result = torch.where(x < 3.75, small, large) return result @torch.jit.script def _rejection_sample(loc, concentration, proposal_r, x): done = torch.zeros(x.shape, dtype=torch.bool, device=loc.device) while not done.all(): u = torch.rand((3,) + x.shape, dtype=loc.dtype, device=loc.device) u1, u2, u3 = u.unbind() z = torch.cos(math.pi * u1) f = (1 + proposal_r * z) / (proposal_r + z) c = concentration * (proposal_r - f) accept = ((c * (2 - c) - u2) > 0) | ((c / u2).log() + 1 - c >= 0) if accept.any(): x = torch.where(accept, (u3 - 0.5).sign() * f.acos(), x) done = done | accept return (x + math.pi + loc) % (2 * math.pi) - math.pi class VonMises(Distribution): """ A circular von Mises distribution. This implementation uses polar coordinates. The ``loc`` and ``value`` args can be any real number (to facilitate unconstrained optimization), but are interpreted as angles modulo 2 pi. Example:: >>> m = dist.VonMises(torch.tensor([1.0]), torch.tensor([1.0])) >>> m.sample() # von Mises distributed with loc=1 and concentration=1 tensor([1.9777]) :param torch.Tensor loc: an angle in radians. :param torch.Tensor concentration: concentration parameter """ arg_constraints = {'loc': constraints.real, 'concentration': constraints.positive} support = constraints.real has_rsample = False def __init__(self, loc, concentration, validate_args=None): self.loc, self.concentration = broadcast_all(loc, concentration) batch_shape = self.loc.shape event_shape = torch.Size() # Parameters for sampling tau = 1 + (1 + 4 * self.concentration ** 2).sqrt() rho = (tau - (2 * tau).sqrt()) / (2 * self.concentration) self._proposal_r = (1 + rho ** 2) / (2 * rho) super(VonMises, self).__init__(batch_shape, event_shape, validate_args) def log_prob(self, value): log_prob = self.concentration * torch.cos(value - self.loc) log_prob = log_prob - math.log(2 * math.pi) - _log_modified_bessel_fn(self.concentration, order=0) return log_prob @torch.no_grad() def sample(self, sample_shape=torch.Size()): """ The sampling algorithm for the von Mises distribution is based on the following paper: Best, D. J., and Nicholas I. Fisher. "Efficient simulation of the von Mises distribution." Applied Statistics (1979): 152-157. """ shape = self._extended_shape(sample_shape) x = torch.empty(shape, dtype=self.loc.dtype, device=self.loc.device) return _rejection_sample(self.loc, self.concentration, self._proposal_r, x) def expand(self, batch_shape): try: return super(VonMises, self).expand(batch_shape) except NotImplementedError: validate_args = self.__dict__.get('_validate_args') loc = self.loc.expand(batch_shape) concentration = self.concentration.expand(batch_shape) return type(self)(loc, concentration, validate_args=validate_args) @property def mean(self): """ The provided mean is the circular one. """ return self.loc @lazy_property def variance(self): """ The provided variance is the circular one. """ return 1 - (_log_modified_bessel_fn(self.concentration, order=1) - _log_modified_bessel_fn(self.concentration, order=0)).exp()
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/von_mises.py
0.892931
0.515986
von_mises.py
pypi
import math from numbers import Number import torch from torch.distributions import constraints from torch.distributions.exp_family import ExponentialFamily from torch.distributions.utils import _standard_normal, broadcast_all class Normal(ExponentialFamily): r""" Creates a normal (also called Gaussian) distribution parameterized by :attr:`loc` and :attr:`scale`. Example:: >>> m = Normal(torch.tensor([0.0]), torch.tensor([1.0])) >>> m.sample() # normally distributed with loc=0 and scale=1 tensor([ 0.1046]) Args: loc (float or Tensor): mean of the distribution (often referred to as mu) scale (float or Tensor): standard deviation of the distribution (often referred to as sigma) """ arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} support = constraints.real has_rsample = True _mean_carrier_measure = 0 @property def mean(self): return self.loc @property def stddev(self): return self.scale @property def variance(self): return self.stddev.pow(2) def __init__(self, loc, scale, validate_args=None): self.loc, self.scale = broadcast_all(loc, scale) if isinstance(loc, Number) and isinstance(scale, Number): batch_shape = torch.Size() else: batch_shape = self.loc.size() super(Normal, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Normal, _instance) batch_shape = torch.Size(batch_shape) new.loc = self.loc.expand(batch_shape) new.scale = self.scale.expand(batch_shape) super(Normal, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def sample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) with torch.no_grad(): return torch.normal(self.loc.expand(shape), self.scale.expand(shape)) def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device) return self.loc + eps * self.scale def log_prob(self, value): if self._validate_args: self._validate_sample(value) # compute the variance var = (self.scale ** 2) log_scale = math.log(self.scale) if isinstance(self.scale, Number) else self.scale.log() return -((value - self.loc) ** 2) / (2 * var) - log_scale - math.log(math.sqrt(2 * math.pi)) def cdf(self, value): if self._validate_args: self._validate_sample(value) return 0.5 * (1 + torch.erf((value - self.loc) * self.scale.reciprocal() / math.sqrt(2))) def icdf(self, value): if self._validate_args: self._validate_sample(value) return self.loc + self.scale * torch.erfinv(2 * value - 1) * math.sqrt(2) def entropy(self): return 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(self.scale) @property def _natural_params(self): return (self.loc / self.scale.pow(2), -0.5 * self.scale.pow(2).reciprocal()) def _log_normalizer(self, x, y): return -0.25 * x.pow(2) / y + 0.5 * torch.log(-math.pi / y)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/normal.py
0.952717
0.467879
normal.py
pypi
r""" The ``distributions`` package contains parameterizable probability distributions and sampling functions. This allows the construction of stochastic computation graphs and stochastic gradient estimators for optimization. This package generally follows the design of the `TensorFlow Distributions`_ package. .. _`TensorFlow Distributions`: https://arxiv.org/abs/1711.10604 It is not possible to directly backpropagate through random samples. However, there are two main methods for creating surrogate functions that can be backpropagated through. These are the score function estimator/likelihood ratio estimator/REINFORCE and the pathwise derivative estimator. REINFORCE is commonly seen as the basis for policy gradient methods in reinforcement learning, and the pathwise derivative estimator is commonly seen in the reparameterization trick in variational autoencoders. Whilst the score function only requires the value of samples :math:`f(x)`, the pathwise derivative requires the derivative :math:`f'(x)`. The next sections discuss these two in a reinforcement learning example. For more details see `Gradient Estimation Using Stochastic Computation Graphs`_ . .. _`Gradient Estimation Using Stochastic Computation Graphs`: https://arxiv.org/abs/1506.05254 Score function ^^^^^^^^^^^^^^ When the probability density function is differentiable with respect to its parameters, we only need :meth:`~torch.distributions.Distribution.sample` and :meth:`~torch.distributions.Distribution.log_prob` to implement REINFORCE: .. math:: \Delta\theta = \alpha r \frac{\partial\log p(a|\pi^\theta(s))}{\partial\theta} where :math:`\theta` are the parameters, :math:`\alpha` is the learning rate, :math:`r` is the reward and :math:`p(a|\pi^\theta(s))` is the probability of taking action :math:`a` in state :math:`s` given policy :math:`\pi^\theta`. In practice we would sample an action from the output of a network, apply this action in an environment, and then use ``log_prob`` to construct an equivalent loss function. Note that we use a negative because optimizers use gradient descent, whilst the rule above assumes gradient ascent. With a categorical policy, the code for implementing REINFORCE would be as follows:: probs = policy_network(state) # Note that this is equivalent to what used to be called multinomial m = Categorical(probs) action = m.sample() next_state, reward = env.step(action) loss = -m.log_prob(action) * reward loss.backward() Pathwise derivative ^^^^^^^^^^^^^^^^^^^ The other way to implement these stochastic/policy gradients would be to use the reparameterization trick from the :meth:`~torch.distributions.Distribution.rsample` method, where the parameterized random variable can be constructed via a parameterized deterministic function of a parameter-free random variable. The reparameterized sample therefore becomes differentiable. The code for implementing the pathwise derivative would be as follows:: params = policy_network(state) m = Normal(*params) # Any distribution with .has_rsample == True could work based on the application action = m.rsample() next_state, reward = env.step(action) # Assuming that reward is differentiable loss = -reward loss.backward() """ from .bernoulli import Bernoulli from .beta import Beta from .binomial import Binomial from .categorical import Categorical from .cauchy import Cauchy from .chi2 import Chi2 from .constraint_registry import biject_to, transform_to from .continuous_bernoulli import ContinuousBernoulli from .dirichlet import Dirichlet from .distribution import Distribution from .exp_family import ExponentialFamily from .exponential import Exponential from .fishersnedecor import FisherSnedecor from .gamma import Gamma from .geometric import Geometric from .gumbel import Gumbel from .half_cauchy import HalfCauchy from .half_normal import HalfNormal from .independent import Independent from .kl import kl_divergence, register_kl from .laplace import Laplace from .log_normal import LogNormal from .logistic_normal import LogisticNormal from .lowrank_multivariate_normal import LowRankMultivariateNormal from .mixture_same_family import MixtureSameFamily from .multinomial import Multinomial from .multivariate_normal import MultivariateNormal from .negative_binomial import NegativeBinomial from .normal import Normal from .one_hot_categorical import OneHotCategorical from .pareto import Pareto from .poisson import Poisson from .relaxed_bernoulli import RelaxedBernoulli from .relaxed_categorical import RelaxedOneHotCategorical from .studentT import StudentT from .transformed_distribution import TransformedDistribution from .transforms import * from .uniform import Uniform from .von_mises import VonMises from .weibull import Weibull __all__ = [ 'Bernoulli', 'Beta', 'Binomial', 'Categorical', 'Cauchy', 'Chi2', 'ContinuousBernoulli', 'Dirichlet', 'Distribution', 'Exponential', 'ExponentialFamily', 'FisherSnedecor', 'Gamma', 'Geometric', 'Gumbel', 'HalfCauchy', 'HalfNormal', 'Independent', 'Laplace', 'LogNormal', 'LogisticNormal', 'LowRankMultivariateNormal', 'MixtureSameFamily', 'Multinomial', 'MultivariateNormal', 'NegativeBinomial', 'Normal', 'OneHotCategorical', 'Pareto', 'RelaxedBernoulli', 'RelaxedOneHotCategorical', 'StudentT', 'Poisson', 'Uniform', 'VonMises', 'Weibull', 'TransformedDistribution', 'biject_to', 'kl_divergence', 'register_kl', 'transform_to', ] __all__.extend(transforms.__all__)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/__init__.py
0.961732
0.89974
__init__.py
pypi
import torch from torch.distributions import constraints from torch.distributions.exponential import Exponential from torch.distributions.transformed_distribution import TransformedDistribution from torch.distributions.transforms import AffineTransform, PowerTransform from torch.distributions.utils import broadcast_all from torch.distributions.gumbel import euler_constant class Weibull(TransformedDistribution): r""" Samples from a two-parameter Weibull distribution. Example: >>> m = Weibull(torch.tensor([1.0]), torch.tensor([1.0])) >>> m.sample() # sample from a Weibull distribution with scale=1, concentration=1 tensor([ 0.4784]) Args: scale (float or Tensor): Scale parameter of distribution (lambda). concentration (float or Tensor): Concentration parameter of distribution (k/shape). """ arg_constraints = {'scale': constraints.positive, 'concentration': constraints.positive} support = constraints.positive def __init__(self, scale, concentration, validate_args=None): self.scale, self.concentration = broadcast_all(scale, concentration) self.concentration_reciprocal = self.concentration.reciprocal() base_dist = Exponential(torch.ones_like(self.scale)) transforms = [PowerTransform(exponent=self.concentration_reciprocal), AffineTransform(loc=0, scale=self.scale)] super(Weibull, self).__init__(base_dist, transforms, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Weibull, _instance) new.scale = self.scale.expand(batch_shape) new.concentration = self.concentration.expand(batch_shape) new.concentration_reciprocal = new.concentration.reciprocal() base_dist = self.base_dist.expand(batch_shape) transforms = [PowerTransform(exponent=new.concentration_reciprocal), AffineTransform(loc=0, scale=new.scale)] super(Weibull, new).__init__(base_dist, transforms, validate_args=False) new._validate_args = self._validate_args return new @property def mean(self): return self.scale * torch.exp(torch.lgamma(1 + self.concentration_reciprocal)) @property def variance(self): return self.scale.pow(2) * (torch.exp(torch.lgamma(1 + 2 * self.concentration_reciprocal)) - torch.exp(2 * torch.lgamma(1 + self.concentration_reciprocal))) def entropy(self): return euler_constant * (1 - self.concentration_reciprocal) + \ torch.log(self.scale * self.concentration_reciprocal) + 1
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/weibull.py
0.944931
0.590425
weibull.py
pypi
from numbers import Number import torch from torch.distributions import constraints from torch.distributions.exp_family import ExponentialFamily from torch.distributions.utils import broadcast_all def _standard_gamma(concentration): return torch._standard_gamma(concentration) class Gamma(ExponentialFamily): r""" Creates a Gamma distribution parameterized by shape :attr:`concentration` and :attr:`rate`. Example:: >>> m = Gamma(torch.tensor([1.0]), torch.tensor([1.0])) >>> m.sample() # Gamma distributed with concentration=1 and rate=1 tensor([ 0.1046]) Args: concentration (float or Tensor): shape parameter of the distribution (often referred to as alpha) rate (float or Tensor): rate = 1 / scale of the distribution (often referred to as beta) """ arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive} support = constraints.positive has_rsample = True _mean_carrier_measure = 0 @property def mean(self): return self.concentration / self.rate @property def variance(self): return self.concentration / self.rate.pow(2) def __init__(self, concentration, rate, validate_args=None): self.concentration, self.rate = broadcast_all(concentration, rate) if isinstance(concentration, Number) and isinstance(rate, Number): batch_shape = torch.Size() else: batch_shape = self.concentration.size() super(Gamma, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(Gamma, _instance) batch_shape = torch.Size(batch_shape) new.concentration = self.concentration.expand(batch_shape) new.rate = self.rate.expand(batch_shape) super(Gamma, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand(shape) value.detach().clamp_(min=torch.finfo(value.dtype).tiny) # do not record in autograd graph return value def log_prob(self, value): value = torch.as_tensor(value, dtype=self.rate.dtype, device=self.rate.device) if self._validate_args: self._validate_sample(value) return (self.concentration * torch.log(self.rate) + (self.concentration - 1) * torch.log(value) - self.rate * value - torch.lgamma(self.concentration)) def entropy(self): return (self.concentration - torch.log(self.rate) + torch.lgamma(self.concentration) + (1.0 - self.concentration) * torch.digamma(self.concentration)) @property def _natural_params(self): return (self.concentration - 1, -self.rate) def _log_normalizer(self, x, y): return torch.lgamma(x + 1) + (x + 1) * torch.log(-y.reciprocal())
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/gamma.py
0.960482
0.626024
gamma.py
pypi
import torch import torch.nn.functional as F from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.utils import broadcast_all, probs_to_logits, lazy_property, logits_to_probs class NegativeBinomial(Distribution): r""" Creates a Negative Binomial distribution, i.e. distribution of the number of successful independent and identical Bernoulli trials before :attr:`total_count` failures are achieved. The probability of success of each Bernoulli trial is :attr:`probs`. Args: total_count (float or Tensor): non-negative number of negative Bernoulli trials to stop, although the distribution is still valid for real valued count probs (Tensor): Event probabilities of success in the half open interval [0, 1) logits (Tensor): Event log-odds for probabilities of success """ arg_constraints = {'total_count': constraints.greater_than_eq(0), 'probs': constraints.half_open_interval(0., 1.), 'logits': constraints.real} support = constraints.nonnegative_integer def __init__(self, total_count, probs=None, logits=None, validate_args=None): if (probs is None) == (logits is None): raise ValueError("Either `probs` or `logits` must be specified, but not both.") if probs is not None: self.total_count, self.probs, = broadcast_all(total_count, probs) self.total_count = self.total_count.type_as(self.probs) else: self.total_count, self.logits, = broadcast_all(total_count, logits) self.total_count = self.total_count.type_as(self.logits) self._param = self.probs if probs is not None else self.logits batch_shape = self._param.size() super(NegativeBinomial, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(NegativeBinomial, _instance) batch_shape = torch.Size(batch_shape) new.total_count = self.total_count.expand(batch_shape) if 'probs' in self.__dict__: new.probs = self.probs.expand(batch_shape) new._param = new.probs if 'logits' in self.__dict__: new.logits = self.logits.expand(batch_shape) new._param = new.logits super(NegativeBinomial, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._param.new(*args, **kwargs) @property def mean(self): return self.total_count * torch.exp(self.logits) @property def variance(self): return self.mean / torch.sigmoid(-self.logits) @lazy_property def logits(self): return probs_to_logits(self.probs, is_binary=True) @lazy_property def probs(self): return logits_to_probs(self.logits, is_binary=True) @property def param_shape(self): return self._param.size() @lazy_property def _gamma(self): return torch.distributions.Gamma(concentration=self.total_count, rate=torch.exp(-self.logits)) def sample(self, sample_shape=torch.Size()): with torch.no_grad(): rate = self._gamma.sample(sample_shape=sample_shape) return torch.poisson(rate) def log_prob(self, value): if self._validate_args: self._validate_sample(value) log_unnormalized_prob = (self.total_count * F.logsigmoid(-self.logits) + value * F.logsigmoid(self.logits)) log_normalization = (-torch.lgamma(self.total_count + value) + torch.lgamma(1. + value) + torch.lgamma(self.total_count)) return log_unnormalized_prob - log_normalization
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/negative_binomial.py
0.921118
0.582966
negative_binomial.py
pypi
import torch from numbers import Number from torch.distributions import constraints from torch.distributions.distribution import Distribution from torch.distributions.transformed_distribution import TransformedDistribution from torch.distributions.transforms import SigmoidTransform from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, clamp_probs class LogitRelaxedBernoulli(Distribution): r""" Creates a LogitRelaxedBernoulli distribution parameterized by :attr:`probs` or :attr:`logits` (but not both), which is the logit of a RelaxedBernoulli distribution. Samples are logits of values in (0, 1). See [1] for more details. Args: temperature (Tensor): relaxation temperature probs (Number, Tensor): the probability of sampling `1` logits (Number, Tensor): the log-odds of sampling `1` [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables (Maddison et al, 2017) [2] Categorical Reparametrization with Gumbel-Softmax (Jang et al, 2017) """ arg_constraints = {'probs': constraints.unit_interval, 'logits': constraints.real} support = constraints.real def __init__(self, temperature, probs=None, logits=None, validate_args=None): self.temperature = temperature if (probs is None) == (logits is None): raise ValueError("Either `probs` or `logits` must be specified, but not both.") if probs is not None: is_scalar = isinstance(probs, Number) self.probs, = broadcast_all(probs) else: is_scalar = isinstance(logits, Number) self.logits, = broadcast_all(logits) self._param = self.probs if probs is not None else self.logits if is_scalar: batch_shape = torch.Size() else: batch_shape = self._param.size() super(LogitRelaxedBernoulli, self).__init__(batch_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(LogitRelaxedBernoulli, _instance) batch_shape = torch.Size(batch_shape) new.temperature = self.temperature if 'probs' in self.__dict__: new.probs = self.probs.expand(batch_shape) new._param = new.probs if 'logits' in self.__dict__: new.logits = self.logits.expand(batch_shape) new._param = new.logits super(LogitRelaxedBernoulli, new).__init__(batch_shape, validate_args=False) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._param.new(*args, **kwargs) @lazy_property def logits(self): return probs_to_logits(self.probs, is_binary=True) @lazy_property def probs(self): return logits_to_probs(self.logits, is_binary=True) @property def param_shape(self): return self._param.size() def rsample(self, sample_shape=torch.Size()): shape = self._extended_shape(sample_shape) probs = clamp_probs(self.probs.expand(shape)) uniforms = clamp_probs(torch.rand(shape, dtype=probs.dtype, device=probs.device)) return (uniforms.log() - (-uniforms).log1p() + probs.log() - (-probs).log1p()) / self.temperature def log_prob(self, value): if self._validate_args: self._validate_sample(value) logits, value = broadcast_all(self.logits, value) diff = logits - value.mul(self.temperature) return self.temperature.log() + diff - 2 * diff.exp().log1p() class RelaxedBernoulli(TransformedDistribution): r""" Creates a RelaxedBernoulli distribution, parametrized by :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both). This is a relaxed version of the `Bernoulli` distribution, so the values are in (0, 1), and has reparametrizable samples. Example:: >>> m = RelaxedBernoulli(torch.tensor([2.2]), torch.tensor([0.1, 0.2, 0.3, 0.99])) >>> m.sample() tensor([ 0.2951, 0.3442, 0.8918, 0.9021]) Args: temperature (Tensor): relaxation temperature probs (Number, Tensor): the probability of sampling `1` logits (Number, Tensor): the log-odds of sampling `1` """ arg_constraints = {'probs': constraints.unit_interval, 'logits': constraints.real} support = constraints.unit_interval has_rsample = True def __init__(self, temperature, probs=None, logits=None, validate_args=None): base_dist = LogitRelaxedBernoulli(temperature, probs, logits) super(RelaxedBernoulli, self).__init__(base_dist, SigmoidTransform(), validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(RelaxedBernoulli, _instance) return super(RelaxedBernoulli, self).expand(batch_shape, _instance=new) @property def temperature(self): return self.base_dist.temperature @property def logits(self): return self.base_dist.logits @property def probs(self): return self.base_dist.probs
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributions/relaxed_bernoulli.py
0.93769
0.6346
relaxed_bernoulli.py
pypi
from __future__ import absolute_import, division, print_function, unicode_literals import torch from .qconfig import QConfig from torch.jit._recursive import wrap_cpp_module class ConvPackedParams(torch.nn.Module): def __init__(self): super(ConvPackedParams, self).__init__() wq = torch._empty_affine_quantized([1, 1, 1, 1], scale=1.0, zero_point=0, dtype=torch.qint8) self.stride = [1, 1] self.padding = [0, 0] self.dilation = [1, 1] self.groups = 1 self.set_weight_bias(wq, None) @torch.jit.export def set_conv_params(self, stride, padding, dilation, groups): # type: (List[int], List[int], List[int], int) -> None self.stride = stride self.padding = padding self.dilation = dilation self.groups = groups @torch.jit.export def set_weight_bias(self, weight, bias): # type: (torch.Tensor, Optional[torch.Tensor]) -> None self._packed_params = torch.ops.quantized.conv2d_prepack(weight, bias, self.stride, self.padding, self.dilation, self.groups) @torch.jit.export def _weight_bias(self): return torch.ops.quantized.conv2d_unpack(self._packed_params) def forward(self, x): return x @torch.jit.export def __getstate__(self): qweight, bias = self._weight_bias() return (qweight, bias, self.stride, self.padding, self.dilation, self.groups, self.training) @torch.jit.export def __setstate__(self, state): self.stride = state[2] self.padding = state[3] self.dilation = state[4] self.groups = state[5] self.set_weight_bias(state[0], state[1]) self.training = state[6] linear_packed_params = None conv_packed_params = None if 'fbgemm' in torch.backends.quantized.supported_engines: linear_packed_params = torch.jit.script(torch.nn.quantized.modules.linear.LinearPackedParams())._c conv_packed_params = torch.jit.script(ConvPackedParams())._c def _check_is_script_module(model): if not isinstance(model, torch.jit.ScriptModule): raise ValueError('input must be a script module, got: ' + str(type(model))) def prepare_script(model, qconfig_dict, inplace=False): _check_is_script_module(model) if not inplace: model = model.copy() model = wrap_cpp_module(torch._C._jit_pass_insert_observers(model._c, 'forward', qconfig_dict, False)) return model def convert_script(model, inplace=False, debug=False): _check_is_script_module(model) if not inplace: model = model.copy() model.eval() model = wrap_cpp_module(torch._C._jit_pass_insert_quant_dequant(model._c, 'forward', False)) if not debug: model = wrap_cpp_module(torch._C._jit_pass_quant_finalize(model._c)) return model # TODO: non-scriptable QConfig will be supported later def script_qconfig(qconfig): return QConfig( activation=torch.jit.script(qconfig.activation())._c, weight=torch.jit.script(qconfig.weight())._c) def quantize_script(model, qconfig_dict, run_fn, run_args, inplace=False, debug=False): _check_is_script_module(model) if not model._c._has_method('forward'): raise ValueError('input script module does not have forward method') assert not inplace, "We don't support inplace right now" if not inplace: model = model.copy() scripted_qconfig_dict = {k: script_qconfig(v) for k, v in qconfig_dict.items()} torch._C._jit_pass_dedup_module_uses(model._c) model = wrap_cpp_module(torch._C._jit_pass_fold_convbn(model._c)) model = prepare_script(model, scripted_qconfig_dict, True) run_fn(model._c._get_method('forward'), *run_args) model = convert_script(model, True, debug) return model
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/quantization/_quantize_script.py
0.878295
0.357806
_quantize_script.py
pypi
from __future__ import absolute_import, division, print_function, unicode_literals import copy import itertools import warnings import torch import torch.nn as nn import torch.nn.intrinsic as nni import torch.nn.quantized as nnq from .default_mappings import (DEFAULT_DYNAMIC_MODULE_MAPPING, DEFAULT_MODULE_MAPPING, DEFAULT_QAT_MODULE_MAPPING, DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST) from .stubs import DeQuantStub, QuantWrapper from .qconfig import default_dynamic_qconfig, float16_dynamic_qconfig def _propagate_qconfig_helper(module, qconfig_dict, white_list=None, qconfig_parent=None, prefix=''): r"""This is a helper function for `propagate_qconfig_` Args: module: input module qconfig_dict: dictionary that maps from name of submodule to quantization configuration white_list: list of quantizable modules qconfig_parent: quantization config of parent module, we will fallback to this config when there is no specified config for current module prefix: corresponding prefix of the current module, used as key in qconfig_dict Return: None, module is modified inplace with qconfig attached """ # TODO: Add test if white_list is None: white_list = DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST module_qconfig = qconfig_dict.get(type(module), qconfig_parent) module_qconfig = qconfig_dict.get(prefix, module_qconfig) module_qconfig = getattr(module, 'qconfig', module_qconfig) if type(module) in white_list: module.qconfig = module_qconfig for name, child in module.named_children(): module_prefix = prefix + '.' + name if prefix else name _propagate_qconfig_helper(child, qconfig_dict, white_list, module_qconfig, module_prefix) # TODO(jerryzh): expose white_list def propagate_qconfig_(module, qconfig_dict=None): r"""Propagate qconfig through the module hierarchy and assign `qconfig` attribute on each leaf module Args: module: input module qconfig_dict: dictionary that maps from name or type of submodule to quantization configuration, qconfig applies to all submodules of a given module unless qconfig for the submodules are specified (when the submodule already has qconfig attribute) Return: None, module is modified inplace with qconfig attached """ if qconfig_dict is None: qconfig_dict = {} _propagate_qconfig_helper(module, qconfig_dict) def _observer_forward_hook(self, input, output): r"""Forward hook that calls observer on the output """ return self.activation_post_process(output) def add_observer_(module): r"""Add observer for the leaf child of the module. This function insert observer module to all leaf child module that has a valid qconfig attribute. Args: module: input module with qconfig attributes for all the leaf modules that we want to quantize Return: None, module is modified inplace with added observer modules and forward_hooks """ for child in module.children(): if type(child) == nnq.FloatFunctional: if hasattr(child, 'qconfig') and child.qconfig is not None: child.activation_post_process = child.qconfig.activation() else: add_observer_(child) # Insert observers only for leaf nodes, note that this observer is for # the output of the module, for input QuantStub will observe them if hasattr(module, 'qconfig') and module.qconfig is not None and \ len(module._modules) == 0 and not isinstance(module, torch.nn.Sequential): # observer and hook will be gone after we swap the module module.add_module('activation_post_process', module.qconfig.activation()) module.register_forward_hook(_observer_forward_hook) def add_quant_dequant(module): r"""Wrap the leaf child module in QuantWrapper if it has a valid qconfig Note that this function will modify the children of module inplace and it can return a new module which wraps the input module as well. Args: module: input module with qconfig attributes for all the leaf modules that we want to quantize Return: Either the inplace modified module with submodules wrapped in `QuantWrapper` based on qconfig or a new `QuantWrapper` module which wraps the input module, the latter case only happens when the input module is a leaf module and we want to quantize it. """ if len(module._modules) == 0 and hasattr(module, 'qconfig') and module.qconfig: return QuantWrapper(module) for name, child in module.named_children(): module._modules[name] = add_quant_dequant(child) return module def prepare(model, inplace=False): r"""Prepares a copy of the model for quantization calibration or quantization-aware training. Quantization configuration should be assigned preemptively to individual submodules in `.qconfig` attribute. The model will be attached with observer or fake quant modules, and qconfig will be propagated. Args: model: input model to be modified in-place inplace: carry out model transformations in-place, the original module is mutated """ if not inplace: model = copy.deepcopy(model) propagate_qconfig_(model) # sanity check common API misusage if not any(hasattr(m, 'qconfig') and m.qconfig for m in model.modules()): warnings.warn("None of the submodule got qconfig applied. Make sure you " "passed correct configuration through `qconfig_dict` or " "by assigning the `.qconfig` attribute directly on submodules") add_observer_(model) return model def quantize(model, run_fn, run_args, mapping=None, inplace=False): r"""Converts a float model to quantized model. First it will prepare the model for calibration or training, then it calls `run_fn` which will run the calibration step or training step, after that we will call `convert` which will convert the model to a quantized model. Args: model: input model run_fn: a function for evaluating the prepared model, can be a function that simply runs the prepared model or a training loop run_args: positional arguments for `run_fn` inplace: carry out model transformations in-place, the original module is mutated mapping: correspondence between original module types and quantized counterparts Return: Quantized model. """ if mapping is None: mapping = DEFAULT_MODULE_MAPPING if not inplace: model = copy.deepcopy(model) model.eval() prepare(model, inplace=True) run_fn(model, run_args) convert(model, mapping, inplace=True) return model def quantize_dynamic(model, qconfig_spec=None, dtype=torch.qint8, mapping=None, inplace=False): r"""Converts a float model to dynamic (i.e. weights-only) quantized model. Replaces specified modules with dynamic weight-only quantized versions and output the quantized model. For simplest usage provide `dtype` argument that can be float16 or qint8. Weight-only quantization by default is performed for layers with large weights size - i.e. Linear and RNN variants. Fine grained control is possible with `qconfig` and `mapping` that act similarly to `quantize()`. If `qconfig` is provided, the `dtype` argument is ignored. Args: module: input model qconfig_spec: Either: - A dictionary that maps from name or type of submodule to quantization configuration, qconfig applies to all submodules of a given module unless qconfig for the submodules are specified (when the submodule already has qconfig attribute). Entries in the dictionary need to be QConfigDynamic instances. - A set of types and/or submodule names to apply dynamic quantization to, in which case the `dtype` argument is used to specifiy the bit-width inplace: carry out model transformations in-place, the original module is mutated mapping: maps type of a submodule to a type of corresponding dynamically quantized version with which the submodule needs to be replaced """ if qconfig_spec is None: if dtype == torch.qint8: qconfig_spec = { nn.Linear : default_dynamic_qconfig, nn.LSTM : default_dynamic_qconfig, } elif dtype == torch.float16: qconfig_spec = { nn.Linear : float16_dynamic_qconfig, nn.LSTM : float16_dynamic_qconfig, } else: raise ValueError( "Don't know how to quantize with default settings for {}. Provide full qconfig please".format(dtype)) elif isinstance(qconfig_spec, set): if dtype is torch.qint8: default_qconfig = default_dynamic_qconfig elif dtype is torch.float16: default_qconfig = float16_dynamic_qconfig else: raise RuntimeError('Unknown dtype specified for quantize_dynamic: ', str(dtype)) qconfig_spec = dict(zip(qconfig_spec, itertools.repeat(default_qconfig))) if mapping is None: mapping = DEFAULT_DYNAMIC_MODULE_MAPPING if not inplace: model = copy.deepcopy(model) model.eval() propagate_qconfig_(model, qconfig_spec) convert(model, mapping, inplace=True) return model def prepare_qat(model, mapping=None, inplace=False): r""" Prepares a copy of the model for quantization calibration or quantization-aware training and convers it to quantized version. Quantization configuration should be assigned preemptively to individual submodules in `.qconfig` attribute. Args: model: input model to be modified in-place mapping: dictionary that maps float modules to quantized modules to be replaced. inplace: carry out model transformations in-place, the original module is mutated """ if mapping is None: mapping = DEFAULT_QAT_MODULE_MAPPING model = prepare(model, inplace=inplace) convert(model, mapping, inplace=True) return model def quantize_qat(model, run_fn, run_args, inplace=False): r"""Do quantization aware training and output a quantized model Args: model: input model run_fn: a function for evaluating the prepared model, can be a function that simply runs the prepared model or a training loop run_args: positional arguments for `run_fn` Return: Quantized model. """ if not inplace: model = copy.deepcopy(model) model.train() prepare_qat(model, inplace=True) run_fn(model, run_args) convert(model, inplace=True) return model def convert(module, mapping=None, inplace=False): r"""Converts the float module with observers (where we can get quantization parameters) to a quantized module. Args: module: calibrated module with observers mapping: a dictionary that maps from float module type to quantized module type, can be overwrritten to allow swapping user defined Modules inplace: carry out model transformations in-place, the original module is mutated """ if mapping is None: mapping = DEFAULT_MODULE_MAPPING if not inplace: module = copy.deepcopy(module) reassign = {} # TODO(jerryzh): remove after deciding on the impl of intrinsic modules # This is required because intrinsic modules right now are implemented as # nn.Sequential and we don't want to swap their constituents SWAPPABLE_MODULES = (nni.ConvBn2d, nni.ConvBnReLU2d, nni.LinearReLU, nni.ConvReLU2d, nni.ConvReLU3d) for name, mod in module.named_children(): if type(mod) not in SWAPPABLE_MODULES: convert(mod, mapping, inplace=True) reassign[name] = swap_module(mod, mapping) for key, value in reassign.items(): module._modules[key] = value return module def swap_module(mod, mapping): r"""Swaps the module if it has a quantized counterpart and it has an `observer` attached. Args: mod: input module mapping: a dictionary that maps from nn module to nnq module Return: The corresponding quantized module of `mod` """ new_mod = mod # Always replace dequantstub with dequantize if hasattr(mod, 'qconfig') and mod.qconfig is not None or type(mod) == DeQuantStub: if type(mod) in mapping: new_mod = mapping[type(mod)].from_float(mod) return new_mod def get_observer_dict(mod, target_dict, prefix=""): r"""Traverse the modules and save all observers into dict. This is mainly used for quantization accuracy debug Args: mod: the top module we want to save all observers prefix: the prefix for the current module target_dict: the dictionary used to save all the observers """ def get_prefix(prefix): return prefix if prefix == "" else prefix + '.' if hasattr(mod, 'activation_post_process'): target_dict[get_prefix(prefix) + 'activation_post_process'] = mod.activation_post_process for name, child in mod.named_children(): module_prefix = get_prefix(prefix) + name if prefix else name get_observer_dict(child, target_dict, module_prefix)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/quantization/quantize.py
0.800341
0.213336
quantize.py
pypi
from __future__ import absolute_import, division, print_function, unicode_literals import torch import copy import torch.nn.intrinsic.modules.fused as torch_fused def fuse_conv_bn(conv, bn): r"""Given the conv and bn modules, fuses them and returns the fused module Args: conv: Module instance of type conv2d bn: Spatial BN instance that needs to be fused with the conv Examples:: >>> m1 = nn.Conv2d(10, 20, 3) >>> b1 = nn.BatchNorm2d(20) >>> m2 = fuse_conv_bn(m1, b1) """ assert(conv.training == bn.training),\ "Conv and BN both must be in the same mode (train or eval)." if conv.training: assert conv.bias is None, 'Only support fusing Conv2d that does not have bias' assert bn.num_features == conv.out_channels, 'Output channel of Conv2d must match num_features of BatchNorm2d' assert bn.affine, 'Only support fusing BatchNorm2d with affine set to True' assert bn.track_running_stats, 'Only support fusing BatchNorm2d with tracking_running_stats set to True' return torch.nn.intrinsic.ConvBn2d(conv, bn) else: return torch.nn.utils.fuse_conv_bn_eval(conv, bn) def fuse_conv_bn_relu(conv, bn, relu): r"""Given the conv and bn modules, fuses them and returns the fused module Args: conv: Module instance of type conv2d bn: Spatial BN instance that needs to be fused with the conv Examples:: >>> m1 = nn.Conv2d(10, 20, 3) >>> b1 = nn.BatchNorm2d(20) >>> m2 = fuse_conv_bn(m1, b1) """ assert(conv.training == bn.training == relu.training),\ "Conv and BN both must be in the same mode (train or eval)." if conv.training: return torch_fused.ConvBnReLU2d(conv, bn, relu) else: return torch_fused.ConvReLU2d( torch.nn.utils.fusion.fuse_conv_bn_eval(conv, bn), relu) # Generalization of getattr def _get_module(model, submodule_key): tokens = submodule_key.split('.') cur_mod = model for s in tokens: cur_mod = getattr(cur_mod, s) return cur_mod # Generalization of setattr def _set_module(model, submodule_key, module): tokens = submodule_key.split('.') sub_tokens = tokens[:-1] cur_mod = model for s in sub_tokens: cur_mod = getattr(cur_mod, s) setattr(cur_mod, tokens[-1], module) def fuse_known_modules(mod_list): r"""Returns a list of modules that fuses the operations specified in the input module list. Fuses only the following sequence of modules: conv, bn conv, bn, relu conv, relu linear, relu For these sequences, the first element in the output module list performs the fused operation. The rest of the elements are set to nn.Identity() """ OP_LIST_TO_FUSER_METHOD = { (torch.nn.Conv2d, torch.nn.BatchNorm2d): fuse_conv_bn, (torch.nn.Conv2d, torch.nn.BatchNorm2d, torch.nn.ReLU): fuse_conv_bn_relu, (torch.nn.Conv2d, torch.nn.ReLU): torch.nn.intrinsic.ConvReLU2d, (torch.nn.Linear, torch.nn.ReLU): torch.nn.intrinsic.LinearReLU } types = tuple(type(m) for m in mod_list) fuser_method = OP_LIST_TO_FUSER_METHOD.get(types, None) if fuser_method is None: raise NotImplementedError("Cannot fuse modules: {}".format(types)) new_mod = [None] * len(mod_list) new_mod[0] = fuser_method(*mod_list) for i in range(1, len(mod_list)): new_mod[i] = torch.nn.Identity() new_mod[i].training = mod_list[0].training return new_mod def _fuse_modules(model, modules_to_fuse, fuser_func=fuse_known_modules): mod_list = [] for item in modules_to_fuse: mod_list.append(_get_module(model, item)) # Fuse list of modules new_mod_list = fuser_func(mod_list) # Replace original module list with fused module list for i, item in enumerate(modules_to_fuse): _set_module(model, item, new_mod_list[i]) def fuse_modules(model, modules_to_fuse, inplace=False, fuser_func=fuse_known_modules): r"""Fuses a list of modules into a single module Fuses only the following sequence of modules: * conv, bn * conv, bn, relu * conv, relu * linear, relu All other sequences are left unchanged. For these sequences, replaces the first item in the list with the fused module, replacing the rest of the modules with identity. Arguments: model: Model containing the modules to be fused modules_to_fuse: list of list of module names to fuse. Can also be a list of strings if there is only a single list of modules to fuse. inplace: bool specifying if fusion happens in place on the model, by default a new model is returned fuser_func: Function that takes in a list of modules and outputs a list of fused modules of the same length. For example, fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()] Defaults to torch.quantization.fuse_known_modules Returns: model with fused modules. A new copy is created if inplace=True. Examples:: >>> m = myModel() >>> # m is a module containing the sub-modules below >>> modules_to_fuse = [ ['conv1', 'bn1', 'relu1'], ['submodule.conv', 'submodule.relu']] >>> fused_m = torch.quantization.fuse_modules(m, modules_to_fuse) >>> output = fused_m(input) >>> m = myModel() >>> # Alternately provide a single list of modules to fuse >>> modules_to_fuse = ['conv1', 'bn1', 'relu1'] >>> fused_m = torch.quantization.fuse_modules(m, modules_to_fuse) >>> output = fused_m(input) """ if not inplace: model = copy.deepcopy(model) if all(isinstance(module_element, str) for module_element in modules_to_fuse): # Handle case of modules_to_fuse being a list _fuse_modules(model, modules_to_fuse, fuser_func) else: # Handle case of modules_to_fuse being a list of lists for module_list in modules_to_fuse: _fuse_modules(model, module_list, fuser_func) return model
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/quantization/fuse_modules.py
0.94066
0.50952
fuse_modules.py
pypi
from __future__ import absolute_import, division, print_function, unicode_literals import torch from torch.nn import Module from .observer import MovingAverageMinMaxObserver, HistogramObserver, MovingAveragePerChannelMinMaxObserver, _with_args class FakeQuantize(Module): r""" Simulate the quantize and dequantize operations in training time. The output of this module is given by x_out = (clamp(round(x/scale + zero_point), quant_min, quant_max)-zero_point)*scale * :attr:`scale` defines the scale factor used for quantization. * :attr:`zero_point` specifies the quantized value to which 0 in floating point maps to * :attr:`quant_min` specifies the minimum allowable quantized value. * :attr:`quant_max` specifies the maximum allowable quantized value. * :attr:`fake_quant_enable` controls the application of fake quantization on tensors, note that statistics can still be updated. * :attr:`observer_enable` controls statistics collection on tensors * :attr:`dtype` specifies the quantized dtype that is being emulated with fake-quantization, allowable values are torch.qint8 and torch.quint8. The values of quant_min and quant_max should be chosen to be consistent with the dtype Args: observer (module): Module for observing statistics on input tensors and calculating scale and zero-point. quant_min (int): The minimum allowable quantized value. quant_max (int): The maximum allowable quantized value. observer_kwargs (optional): Arguments for the observer module Attributes: observer (Module): User provided module that collects statistics on the input tensor and provides a method to calculate scale and zero-point. """ def __init__(self, observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, **observer_kwargs): super(FakeQuantize, self).__init__() assert quant_min <= quant_max, \ 'quant_min must be less than or equal to quant_max' self.quant_min = quant_min self.quant_max = quant_max self.fake_quant_enabled = True self.observer_enabled = True self.activation_post_process = observer(**observer_kwargs) assert torch.iinfo(self.activation_post_process.dtype).min <= quant_min, 'quant_min out of bound' assert quant_max <= torch.iinfo(self.activation_post_process.dtype).max, 'quant_max out of bound' self.register_buffer('scale', torch.tensor([1.0])) self.register_buffer('zero_point', torch.tensor([0])) self.dtype = self.activation_post_process.dtype self.qscheme = self.activation_post_process.qscheme self.ch_axis = self.activation_post_process.ch_axis if hasattr(self.activation_post_process, 'ch_axis') else None def enable_fake_quant(self, enabled=True): self.fake_quant_enabled = enabled return self def disable_fake_quant(self): return self.enable_fake_quant(False) def enable_observer(self, enabled=True): self.observer_enabled = enabled return self def disable_observer(self): return self.enable_observer(False) def calculate_qparams(self): return self.activation_post_process.calculate_qparams() def forward(self, X): if self.observer_enabled: self.activation_post_process(X.detach()) _scale, _zero_point = self.calculate_qparams() self.scale, self.zero_point = _scale.to(self.scale.device), _zero_point.to(self.zero_point.device) if self.fake_quant_enabled: if self.qscheme == torch.per_channel_symmetric or self.qscheme == torch.per_channel_affine: X = torch.fake_quantize_per_channel_affine(X, self.scale, self.zero_point, self.ch_axis, self.quant_min, self.quant_max) else: X = torch.fake_quantize_per_tensor_affine(X, float(self.scale), int(self.zero_point), self.quant_min, self.quant_max) return X with_args = classmethod(_with_args) def extra_repr(self): return 'fake_quant_enabled={}, observer_enabled={},\ scale={}, zero_point={}'.format( self.fake_quant_enabled, self.observer_enabled, self.scale, self.zero_point) def _save_to_state_dict(self, destination, prefix, keep_vars): # We cannot currently register scalar values as buffers, so need to manually # specify serialization here. super(FakeQuantize, self)._save_to_state_dict(destination, prefix, keep_vars) destination[prefix + 'scale'] = self.scale destination[prefix + 'zero_point'] = self.zero_point def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): # Removing this function throws an error that the the size of the loaded tensor does not match the original size # i.e., These buffers start out with numel 0 and become numel 1 once they have their first forward pass. local_state = ['scale', 'zero_point'] for name in local_state: key = prefix + name if key in state_dict: val = state_dict[key] setattr(self, name, val) elif strict: missing_keys.append(key) super(FakeQuantize, self)._load_from_state_dict(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) default_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True) default_weight_fake_quant = FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=-128, quant_max=127, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric, reduce_range=False) default_per_channel_weight_fake_quant = FakeQuantize.with_args(observer=MovingAveragePerChannelMinMaxObserver, quant_min=-128, quant_max=127, dtype=torch.qint8, qscheme=torch.per_channel_symmetric, reduce_range=False, ch_axis=0) default_histogram_fake_quant = FakeQuantize.with_args(observer=HistogramObserver, quant_min=0, quant_max=255, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True) def disable_fake_quant(mod): if type(mod) == FakeQuantize: mod.disable_fake_quant() def enable_fake_quant(mod): if type(mod) == FakeQuantize: mod.enable_fake_quant() def disable_observer(mod): if type(mod) == FakeQuantize: mod.disable_observer() def enable_observer(mod): if type(mod) == FakeQuantize: mod.enable_observer()
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/quantization/fake_quantize.py
0.923962
0.576721
fake_quantize.py
pypi
from __future__ import absolute_import, division, print_function, unicode_literals from collections import namedtuple from .observer import * from .fake_quantize import * import torch.nn as nn class QConfig(namedtuple('QConfig', ['activation', 'weight'])): """ Describes how to quantize a layer or a part of the network by providing settings (observer classes) for activations and weights respectively. Note that QConfig needs to contain observer **classes** (like MinMaxObserver) or a callable that returns instances on invocation, not the concrete observer instances themselves. Quantization preparation function will instantiate observers multiple times for each of the layers. Observer classes have usually reasonable default arguments, but they can be overwritten with `with_args` method (that behaves like functools.partial): my_qconfig = QConfig(activation=MinMaxObserver.with_args(dtype=torch.qint8), weight=default_observer.with_args(dtype=torch.qint8)) """ def __new__(cls, activation, weight): # catch common mistakes if isinstance(activation, nn.Module) or isinstance(weight, nn.Module): raise ValueError("QConfig received observer instance, please pass observer class instead. " + "Use MyObserver.with_args(x=1) to override arguments to constructor if needed") return super(QConfig, cls).__new__(cls, activation, weight) default_qconfig = QConfig(activation=default_observer, weight=default_weight_observer) default_debug_qconfig = QConfig(weight=default_weight_observer, activation=default_debug_observer) default_per_channel_qconfig = QConfig(activation=default_observer, weight=default_per_channel_weight_observer) class QConfigDynamic(namedtuple('QConfigDynamic', ['weight'])): """ Describes how to dynamically quantize a layer or a part of the network by providing settings (observer classe) for weights. It's like QConfig, but for dynamic quantization. Note that QConfigDynamic needs to contain observer **classes** (like MinMaxObserver) or a callable that returns instances on invocation, not the concrete observer instances themselves. Quantization function will instantiate observers multiple times for each of the layers. Observer classes have usually reasonable default arguments, but they can be overwritten with `with_args` method (that behaves like functools.partial): my_qconfig = QConfigDynamic(weight=default_observer.with_args(dtype=torch.qint8)) """ def __new__(cls, weight): # catch common mistakes if isinstance(weight, nn.Module): raise ValueError("QConfigDynamic received observer instance, please pass observer class instead. " + "Use MyObserver.with_args(x=1) to override arguments to constructor if needed") return super(QConfigDynamic, cls).__new__(cls, weight) default_dynamic_qconfig = QConfigDynamic(weight=default_weight_observer) float16_dynamic_qconfig = QConfigDynamic(weight=NoopObserver.with_args(dtype=torch.float16)) per_channel_dynamic_qconfig = QConfigDynamic(weight=default_per_channel_weight_observer) default_qat_qconfig = QConfig(activation=default_fake_quant, weight=default_weight_fake_quant) default_weight_only_qconfig = QConfig(activation=torch.nn.Identity, weight=default_weight_fake_quant) default_activation_only_qconfig = QConfig(activation=default_fake_quant, weight=torch.nn.Identity) def get_default_qconfig(backend='fbgemm'): if backend == 'fbgemm': qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=True), weight=default_per_channel_weight_observer) elif backend == 'qnnpack': qconfig = QConfig(activation=HistogramObserver.with_args(reduce_range=False), weight=default_weight_observer) else: raise ValueError("Unknown backend, please specify qconfig manually") return qconfig def get_default_qat_qconfig(backend='fbgemm'): # Histogram observer is too slow for quantization aware training if backend == 'fbgemm': qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, reduce_range=True), weight=default_per_channel_weight_fake_quant) elif backend == 'qnnpack': qconfig = QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver, quant_min=0, quant_max=255, reduce_range=False), weight=default_weight_fake_quant) else: raise ValueError("Unknown backend, please specify qconfig manually") return qconfig
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/quantization/qconfig.py
0.903114
0.27389
qconfig.py
pypi
import torch.jit import inspect import textwrap # this file is for generating documentation using sphinx autodoc # > help(torch.jit.supported_ops) will also give a nice listed of the # supported ops programmatically def _hidden(name): return name.startswith('_') and not name.startswith('__') def _emit_type(type): return str(type) def _emit_arg(indent, i, arg): v = "{} : {}".format(arg.name, _emit_type(arg.type)) default = arg.default_value if default is not None: v = "{}={}".format(v, str(default)) if i > 0: v = "\n{}{}".format(" " * indent, v) return v def _emit_args(indent, arguments): return ",".join(_emit_arg(indent, i, arg) for i, arg in enumerate(arguments)) def _emit_ret(ret): return _emit_type(ret.type) def _emit_rets(returns): if len(returns) == 1: return _emit_ret(returns[0]) return "Tuple[{}]".format(", ".join(_emit_ret(r) for r in returns)) def _emit_schema(mod, name, schema, arg_start=0, padding=4): if mod is None: qualified_name = name else: qualified_name = "{}.{}".format(mod, name) schema = "{}({}) -> {}".format(qualified_name, _emit_args(len(qualified_name) + 1 + padding, schema.arguments[arg_start:]), _emit_rets(schema.returns)) return schema def _get_tensor_ops(): def is_tensor_method(schema): if len(schema.arguments) == 0: return False self = schema.arguments[0] if self.name != 'self': return False if not self.type.isSubtypeOf(torch._C.TensorType.get()): return False return True methods = [] # discover methods for elem in dir(torch.Tensor): if not _hidden(elem): schemas = torch._C._jit_get_schemas_for_operator("aten::" + elem) for schema in schemas: if is_tensor_method(schema): methods.append(_emit_schema('Tensor', elem, schema, arg_start=1)) return "Supported Tensor Methods", methods def _get_nn_functional_ops(): functions = [] # Iterate over torch.nn.functional mod = torch.nn.functional name = mod.__name__ for elem in dir(torch.nn.functional): attr = getattr(mod, elem) if not inspect.isfunction(attr) or _hidden(elem[0]): # Ignore non-functions and internal methods continue if 'torch.nn.functional' not in inspect.getmodule(attr).__name__: # Ignore functions from outside torch.nn.functional continue try: # compile fn, get schema scripted = torch.jit.script(attr) schema = scripted.schema functions.append(_emit_schema(name, elem, schema)) except: # noqa # Skip interpolate / boolean dispatched things pass # Iterate over modules that we know contain a lot of builtins for mod in torch.jit._modules_containing_builtins: name = mod.__name__ for elem in dir(mod): builtin = torch.jit._find_builtin(getattr(mod, elem)) if builtin is not None: schemas = torch._C._jit_get_schemas_for_operator(builtin) for schema in schemas: # remove _tan but not __and__ if not _hidden(elem): functions.append(_emit_schema(name, elem, schema)) return "Supported PyTorch Functions", functions def _get_builtins_helper(): builtins = [] for fn, _builtin_name in torch.jit._builtin_ops: mod = inspect.getmodule(fn) if _hidden(fn.__name__) or _hidden(fn.__qualname__) or _hidden(mod.__name__): # skip internal-only methods continue if 'torch._C' in mod.__name__: continue builtins.append((fn, _builtin_name)) return builtins def _is_math_fn(fn): mod = inspect.getmodule(fn) return mod.__name__ == 'math' def _get_torchscript_builtins(): functions = [] builtins = filter(lambda fn: not _is_math_fn(fn[0]), _get_builtins_helper()) builtins = list(builtins) # Iterate over the specially added builtins for fn, _builtin_name in builtins: mod = inspect.getmodule(fn) builtin = torch.jit._find_builtin(fn) if builtin is not None: schemas = torch._C._jit_get_schemas_for_operator(builtin) for schema in schemas: functions.append(_emit_schema(mod.__name__, fn.__name__, schema)) pass return "TorchScript Builtin Functions", functions def _get_math_builtins(): functions = [] builtins = filter(lambda fn: _is_math_fn(fn[0]), _get_builtins_helper()) builtins = list(builtins) # Iterate over the specially added builtins for fn, _builtin_name in builtins: mod = inspect.getmodule(fn) builtin = torch.jit._find_builtin(fn) if builtin is not None: schemas = torch._C._jit_get_schemas_for_operator(builtin) for schema in schemas: schema = _emit_schema(mod.__name__, fn.__name__, schema) if 'Tensor' in schema: # Skip Tensor ops that have the same name as math functions # (they will show up in the tensor methods section) continue functions.append(schema) pass return "``math`` Module", functions def _get_global_builtins(): # Taken from the 'globals' map in torch/csrc/jit/frontend/ir_emitter.cpp supported_builtins = [ 'print', 'tuple', 'float', 'int', 'bool', 'str', 'getattr', 'hasattr', 'isinstance', 'len', 'hex', 'oct', 'round', 'hash', 'min', 'max', 'abs', 'all', 'divmod', 'list', 'ord', 'chr', 'bin', 'range', 'zip', 'enumerate', 'sorted', ] op_renames = { 'bool': 'aten::Bool', 'int': 'aten::Int', 'float': 'aten::Float', 'abs': 'prim::abs', 'max': 'prim::max', 'min': 'prim::min', 'range': 'fake::does_not_exist', } schemaless_op_explanations = { 'print': 'Print any value', 'tuple': 'Lists cannot be converted to tuples with this method since their size is not statically known', 'getattr': 'Attribute name must be a literal string', 'hasattr': 'Attribute name must be a literal string', 'isinstance': 'Result is static', 'zip': 'Arguments must be iterable. See :ref:`Iterables <jit_iterables>` for details.', 'enumerate': 'Arguments must be iterable. See :ref:`Iterables <jit_iterables>` for details.', 'range': 'Can only be used as an iterator in a for loop', } magic_methods = [ ('float', '__float__'), ('int', '__int__'), ('bool', '__bool__'), ('str', '__str__'), ('len', '__len__'), ('hex', '__hex__'), ('oct', '__oct__'), ] magic_methods_rows = [] for fn, magic_method in magic_methods: magic_methods_rows.append('":any:`{}`", "``{}``"'.format(fn, magic_method)) schematized_ops = [] schemaless_ops = [] for fn in supported_builtins: op_name = 'aten::{}'.format(fn) if fn in op_renames: op_name = op_renames[fn] schemas = torch._C._jit_get_schemas_for_operator(op_name) for s in schemas: schematized_ops.append(_emit_schema(None, fn, s, padding=0)) if len(schemas) > 0: schematized_ops.append('') else: table_row = '":any:`{}`", "{}"'.format(fn, schemaless_op_explanations[fn]) schemaless_ops.append(table_row) schematized_ops = '\n'.join(schematized_ops) schemaless_ops = '\n'.join(schemaless_ops) magic_methods_rows = '\n'.join(magic_methods_rows) schematized_ops = textwrap.indent(schematized_ops, '\t') schemaless_ops = textwrap.indent(schemaless_ops, '\t') magic_methods_rows = textwrap.indent(magic_methods_rows, '\t') section = """ The functions in the following table are supported but do not have a static schema .. csv-table:: :header: "Function", "Note" {} The following functions will use the corresponding magic method on :any:`TorchScript classes` .. csv-table:: :header: "Function", "Magic Method" {} These built-in functions do have a schema .. rst-class:: codeblock-height-limiter :: {} """.format(schemaless_ops, magic_methods_rows, schematized_ops) return "Python Built-in Functions", section def _list_supported_ops(): def emit_block(decls): return '\n.. rst-class:: codeblock-height-limiter\n\n::\n\n{}\n'.format(''.join(' {}\n\n'.format(d) for d in decls)) body = '' op_gathering_fns = ( _get_tensor_ops, _get_nn_functional_ops, _get_torchscript_builtins, _get_global_builtins, _get_math_builtins, ) for fn in op_gathering_fns: header, items = fn() link_target = header.replace('`', '').replace('-', '').lower().replace(' ', '-') if isinstance(items, str): section = "{}\n{}\n{}\n".format(header, '~' * len(header), items) else: section = "{}\n{}\n{}".format(header, '~' * len(header), emit_block(items)) section = '.. _{}:'.format(link_target) + '\n\n' + section body += section return body __doc__ = _list_supported_ops()
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/jit/supported_ops.py
0.637934
0.155559
supported_ops.py
pypi
from __future__ import absolute_import, division, print_function, unicode_literals import torch class MkldnnLinear(torch.jit.ScriptModule): def __init__(self, dense_module): super(MkldnnLinear, self).__init__() self.register_buffer('weight', dense_module.weight.to_mkldnn()) if dense_module.bias is not None: self.register_buffer('bias', dense_module.bias.to_mkldnn()) else: # TODO: Remove this once ScriptModule supports registering None buffer self.register_buffer( 'bias', torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn()) @torch.jit.script_method def __getstate__(self): return (self.weight.to_dense(), self.bias.to_dense(), self.training) @torch.jit.script_method def __setstate__(self, state): self.weight = state[0].to_mkldnn() self.bias = state[1].to_mkldnn() self.training = state[2] @torch.jit.script_method def forward(self, x): x_mkldnn = x if x.is_mkldnn else x.to_mkldnn() y_mkldnn = torch._C._nn.mkldnn_linear(x_mkldnn, self.weight, self.bias) y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense() return y class MkldnnConv2d(torch.jit.ScriptModule): __constants__ = ['stride', 'padding', 'dilation', 'groups'] def __init__(self, dense_module): super(MkldnnConv2d, self).__init__() self.stride = dense_module.stride self.padding = dense_module.padding self.dilation = dense_module.dilation self.groups = dense_module.groups self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv2d_weight( dense_module.weight.to_mkldnn(), self.padding, self.stride, self.dilation, self.groups)) if dense_module.bias is not None: self.register_buffer('bias', dense_module.bias.to_mkldnn()) else: # TODO: Remove this once ScriptModule supports registering None buffer self.register_buffer( 'bias', torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn()) @torch.jit.script_method def __getstate__(self): return (self.weight.to_dense(), self.bias.to_dense(), self.training) @torch.jit.script_method def __setstate__(self, state): self.weight = torch._C._nn.mkldnn_reorder_conv2d_weight( state[0].to_mkldnn(), self.padding, self.stride, self.dilation, self.groups) self.bias = state[1].to_mkldnn() self.training = state[2] @torch.jit.script_method def forward(self, x): return torch.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class MkldnnBatchNorm2d(torch.jit.ScriptModule): __constants__ = ['exponential_average_factor', 'eps'] def __init__(self, dense_module): super(MkldnnBatchNorm2d, self).__init__() assert(not dense_module.training) assert(dense_module.track_running_stats) assert(dense_module.affine) if dense_module.momentum is None: self.exponential_average_factor = 0.0 else: self.exponential_average_factor = dense_module.momentum self.eps = dense_module.eps self.register_buffer('weight', dense_module.weight.to_mkldnn()) self.register_buffer('bias', dense_module.bias.to_mkldnn()) self.register_buffer('running_mean', dense_module.running_mean.to_mkldnn()) self.register_buffer('running_var', dense_module.running_var.to_mkldnn()) @torch.jit.script_method def __getstate__(self): weight = self.weight.to_dense() bias = self.bias.to_dense() running_mean = self.running_mean.to_dense() running_var = self.running_var.to_dense() return (weight, bias, running_mean, running_var, self.training) @torch.jit.script_method def __setstate__(self, state): self.weight = state[0].to_mkldnn() self.bias = state[1].to_mkldnn() self.running_mean = state[2].to_mkldnn() self.running_var = state[3].to_mkldnn() self.training = state[4] @torch.jit.script_method def forward(self, x): return torch.batch_norm( x, self.weight, self.bias, self.running_mean, self.running_var, False, # training self.exponential_average_factor, self.eps, False, # cuda_enabled ) def to_mkldnn(module): def m_fn(m): if isinstance(m, torch.nn.Linear): return MkldnnLinear(m) elif isinstance(m, torch.nn.Conv2d): return MkldnnConv2d(m) elif isinstance(m, torch.nn.BatchNorm2d): return MkldnnBatchNorm2d(m) else: return m def m_fn_rec(m): new_m = m_fn(m) for name, sub_m in m.named_children(): setattr(new_m, name, m_fn_rec(sub_m)) return new_m return m_fn_rec(module)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/mkldnn.py
0.74008
0.406626
mkldnn.py
pypi
from __future__ import absolute_import, division, print_function, unicode_literals import torch._C def format_time(time_us=None, time_ms=None, time_s=None): '''Defines how to format time''' assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1 US_IN_SECOND = 1e6 US_IN_MS = 1e3 if time_us is None: if time_ms is not None: time_us = time_ms * US_IN_MS elif time_s is not None: time_us = time_s * US_IN_SECOND else: raise AssertionError("Shouldn't reach here :)") if time_us >= US_IN_SECOND: return '{:.3f}s'.format(time_us / US_IN_SECOND) if time_us >= US_IN_MS: return '{:.3f}ms'.format(time_us / US_IN_MS) return '{:.3f}us'.format(time_us) class ExecutionStats(object): def __init__(self, c_stats, benchmark_config): self._c_stats = c_stats self.benchmark_config = benchmark_config @property def latency_avg_ms(self): return self._c_stats.latency_avg_ms @property def num_iters(self): return self._c_stats.num_iters @property def iters_per_second(self): ''' Returns total number of iterations per second across all calling threads ''' return self.num_iters / self.total_time_seconds @property def total_time_seconds(self): return self.num_iters * ( self.latency_avg_ms / 1000.0) / self.benchmark_config.num_calling_threads def __str__(self): return '\n'.join([ "Average latency per example: " + format_time(time_ms=self.latency_avg_ms), "Total number of iterations: {}".format(self.num_iters), "Total number of iterations per second (across all threads): {:.2f}".format(self.iters_per_second), "Total time: " + format_time(time_s=self.total_time_seconds) ]) class ThroughputBenchmark(object): ''' This class is a wrapper around a c++ component throughput_benchmark::ThroughputBenchmark responsible for executing a PyTorch module (nn.Module or ScriptModule) under an inference server like load. It can emulate multiple calling threads to a single module provided. In the future we plan to enhance this component to support inter and intra-op parallelism as well as multiple models running in a single process. Please note that even though nn.Module is supported, it might incur an overhead from the need to hold GIL every time we execute Python code or pass around inputs as Python objects. As soon as you have a ScriptModule version of your model for inference deployment it is better to switch to using it in this benchmark. Example:: >>> from torch.utils import ThroughputBenchmark >>> bench = ThroughputBenchmark(my_module) >>> # Pre-populate benchmark's data set with the inputs >>> for input in inputs: # Both args and kwargs work, same as any PyTorch Module / ScriptModule bench.add_input(input[0], x2=input[1]) >>> Inputs supplied above are randomly used during the execution >>> stats = bench.benchmark( num_calling_threads=4, num_warmup_iters = 100, num_iters = 1000, ) >>> print("Avg latency (ms): {}".format(stats.latency_avg_ms)) >>> print("Number of iterations: {}".format(stats.num_iters)) ''' def __init__(self, module): if isinstance(module, torch.jit.ScriptModule): self._benchmark = torch._C.ThroughputBenchmark(module._c) else: self._benchmark = torch._C.ThroughputBenchmark(module) def run_once(self, *args, **kwargs): ''' Given input id (input_idx) run benchmark once and return prediction. This is useful for testing that benchmark actually runs the module you want it to run. input_idx here is an index into inputs array populated by calling add_input() method. ''' return self._benchmark.run_once(*args, **kwargs) def add_input(self, *args, **kwargs): ''' Store a single input to a module into the benchmark memory and keep it there. During the benchmark execution every thread is going to pick up a random input from the all the inputs ever supplied to the benchmark via this function. ''' self._benchmark.add_input(*args, **kwargs) def benchmark(self, num_calling_threads=1, num_warmup_iters=10, num_iters=100): ''' Args: num_warmup_iters (int): Warmup iters are used to make sure we run a module a few times before actually measuring things. This way we avoid cold caches and any other similar problems. This is the number of warmup iterations for each of the thread in separate num_iters (int): Number of iterations the benchmark should run with. This number is separate from the warmup iterations. Also the number is shared across all the threads. Once the num_iters iterations across all the threads is reached, we will stop execution. Though total number of iterations might be slightly larger. Which is reported as stats.num_iters where stats is the result of this function This function returns BenchmarkExecutionStats object which is defined via pybind11. It currently has two fields: - num_iters - number of actual iterations the benchmark have made - avg_latency_ms - average time it took to infer on one input example in milliseconds ''' config = torch._C.BenchmarkConfig() config.num_calling_threads = num_calling_threads config.num_warmup_iters = num_warmup_iters config.num_iters = num_iters c_stats = self._benchmark.benchmark(config) return ExecutionStats(c_stats, config)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/throughput_benchmark.py
0.903804
0.327561
throughput_benchmark.py
pypi
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import six import time import torch from tensorboard.compat import tf from tensorboard.compat.proto.event_pb2 import SessionLog from tensorboard.compat.proto.event_pb2 import Event from tensorboard.compat.proto import event_pb2 from tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig from tensorboard.summary.writer.event_file_writer import EventFileWriter from ._convert_np import make_np from ._embedding import ( make_mat, make_sprite, make_tsv, write_pbtxt, get_embedding_info, ) from ._onnx_graph import load_onnx_graph from ._pytorch_graph import graph from ._utils import figure_to_image from .summary import ( scalar, histogram, histogram_raw, image, audio, text, pr_curve, pr_curve_raw, video, custom_scalars, image_boxes, mesh, hparams ) class FileWriter(object): """Writes protocol buffers to event files to be consumed by TensorBoard. The `FileWriter` class provides a mechanism to create an event file in a given directory and add summaries and events to it. The class updates the file contents asynchronously. This allows a training program to call methods to add data to the file directly from the training loop, without slowing down training. """ def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=''): """Creates a `FileWriter` and an event file. On construction the writer creates a new event file in `log_dir`. The other arguments to the constructor control the asynchronous writes to the event file. Args: log_dir: A string. Directory where event file will be written. max_queue: Integer. Size of the queue for pending events and summaries before one of the 'add' calls forces a flush to disk. Default is ten items. flush_secs: Number. How often, in seconds, to flush the pending events and summaries to disk. Default is every two minutes. filename_suffix: A string. Suffix added to all event filenames in the log_dir directory. More details on filename construction in tensorboard.summary.writer.event_file_writer.EventFileWriter. """ # Sometimes PosixPath is passed in and we need to coerce it to # a string in all cases # TODO: See if we can remove this in the future if we are # actually the ones passing in a PosixPath log_dir = str(log_dir) self.event_writer = EventFileWriter( log_dir, max_queue, flush_secs, filename_suffix) def get_logdir(self): """Returns the directory where event file will be written.""" return self.event_writer.get_logdir() def add_event(self, event, step=None, walltime=None): """Adds an event to the event file. Args: event: An `Event` protocol buffer. step: Number. Optional global step value for training process to record with the event. walltime: float. Optional walltime to override the default (current) walltime (from time.time()) seconds after epoch """ event.wall_time = time.time() if walltime is None else walltime if step is not None: # Make sure step is converted from numpy or other formats # since protobuf might not convert depending on version event.step = int(step) self.event_writer.add_event(event) def add_summary(self, summary, global_step=None, walltime=None): """Adds a `Summary` protocol buffer to the event file. This method wraps the provided summary in an `Event` protocol buffer and adds it to the event file. Args: summary: A `Summary` protocol buffer. global_step: Number. Optional global step value for training process to record with the summary. walltime: float. Optional walltime to override the default (current) walltime (from time.time()) seconds after epoch """ event = event_pb2.Event(summary=summary) self.add_event(event, global_step, walltime) def add_graph(self, graph_profile, walltime=None): """Adds a `Graph` and step stats protocol buffer to the event file. Args: graph_profile: A `Graph` and step stats protocol buffer. walltime: float. Optional walltime to override the default (current) walltime (from time.time()) seconds after epoch """ graph = graph_profile[0] stepstats = graph_profile[1] event = event_pb2.Event(graph_def=graph.SerializeToString()) self.add_event(event, None, walltime) trm = event_pb2.TaggedRunMetadata( tag='step1', run_metadata=stepstats.SerializeToString()) event = event_pb2.Event(tagged_run_metadata=trm) self.add_event(event, None, walltime) def add_onnx_graph(self, graph, walltime=None): """Adds a `Graph` protocol buffer to the event file. Args: graph: A `Graph` protocol buffer. walltime: float. Optional walltime to override the default (current) _get_file_writerfrom time.time()) """ event = event_pb2.Event(graph_def=graph.SerializeToString()) self.add_event(event, None, walltime) def flush(self): """Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk. """ self.event_writer.flush() def close(self): """Flushes the event file to disk and close the file. Call this method when you do not need the summary writer anymore. """ self.event_writer.close() def reopen(self): """Reopens the EventFileWriter. Can be called after `close()` to add more events in the same directory. The events will go into a new events file. Does nothing if the EventFileWriter was not closed. """ self.event_writer.reopen() class SummaryWriter(object): """Writes entries directly to event files in the log_dir to be consumed by TensorBoard. The `SummaryWriter` class provides a high-level API to create an event file in a given directory and add summaries and events to it. The class updates the file contents asynchronously. This allows a training program to call methods to add data to the file directly from the training loop, without slowing down training. """ def __init__(self, log_dir=None, comment='', purge_step=None, max_queue=10, flush_secs=120, filename_suffix=''): """Creates a `SummaryWriter` that will write out events and summaries to the event file. Args: log_dir (string): Save directory location. Default is runs/**CURRENT_DATETIME_HOSTNAME**, which changes after each run. Use hierarchical folder structure to compare between runs easily. e.g. pass in 'runs/exp1', 'runs/exp2', etc. for each new experiment to compare across them. comment (string): Comment log_dir suffix appended to the default ``log_dir``. If ``log_dir`` is assigned, this argument has no effect. purge_step (int): When logging crashes at step :math:`T+X` and restarts at step :math:`T`, any events whose global_step larger or equal to :math:`T` will be purged and hidden from TensorBoard. Note that crashed and resumed experiments should have the same ``log_dir``. max_queue (int): Size of the queue for pending events and summaries before one of the 'add' calls forces a flush to disk. Default is ten items. flush_secs (int): How often, in seconds, to flush the pending events and summaries to disk. Default is every two minutes. filename_suffix (string): Suffix added to all event filenames in the log_dir directory. More details on filename construction in tensorboard.summary.writer.event_file_writer.EventFileWriter. Examples:: from torch.utils.tensorboard import SummaryWriter # create a summary writer with automatically generated folder name. writer = SummaryWriter() # folder location: runs/May04_22-14-54_s-MacBook-Pro.local/ # create a summary writer using the specified folder name. writer = SummaryWriter("my_experiment") # folder location: my_experiment # create a summary writer with comment appended. writer = SummaryWriter(comment="LR_0.1_BATCH_16") # folder location: runs/May04_22-14-54_s-MacBook-Pro.localLR_0.1_BATCH_16/ """ torch._C._log_api_usage_once("tensorboard.create.summarywriter") if not log_dir: import socket from datetime import datetime current_time = datetime.now().strftime('%b%d_%H-%M-%S') log_dir = os.path.join( 'runs', current_time + '_' + socket.gethostname() + comment) self.log_dir = log_dir self.purge_step = purge_step self.max_queue = max_queue self.flush_secs = flush_secs self.filename_suffix = filename_suffix # Initialize the file writers, but they can be cleared out on close # and recreated later as needed. self.file_writer = self.all_writers = None self._get_file_writer() # Create default bins for histograms, see generate_testdata.py in tensorflow/tensorboard v = 1E-12 buckets = [] neg_buckets = [] while v < 1E20: buckets.append(v) neg_buckets.append(-v) v *= 1.1 self.default_bins = neg_buckets[::-1] + [0] + buckets def _check_caffe2_blob(self, item): """ Caffe2 users have the option of passing a string representing the name of a blob in the workspace instead of passing the actual Tensor/array containing the numeric values. Thus, we need to check if we received a string as input instead of an actual Tensor/array, and if so, we need to fetch the Blob from the workspace corresponding to that name. Fetching can be done with the following: from caffe2.python import workspace (if not already imported) workspace.FetchBlob(blob_name) workspace.FetchBlobs([blob_name1, blob_name2, ...]) """ return isinstance(item, six.string_types) def _get_file_writer(self): """Returns the default FileWriter instance. Recreates it if closed.""" if self.all_writers is None or self.file_writer is None: self.file_writer = FileWriter(self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix) self.all_writers = {self.file_writer.get_logdir(): self.file_writer} if self.purge_step is not None: most_recent_step = self.purge_step self.file_writer.add_event( Event(step=most_recent_step, file_version='brain.Event:2')) self.file_writer.add_event( Event(step=most_recent_step, session_log=SessionLog(status=SessionLog.START))) self.purge_step = None return self.file_writer def get_logdir(self): """Returns the directory where event files will be written.""" return self.log_dir def add_hparams(self, hparam_dict=None, metric_dict=None): """Add a set of hyperparameters to be compared in TensorBoard. Args: hparam_dict (dict): Each key-value pair in the dictionary is the name of the hyper parameter and it's corresponding value. metric_dict (dict): Each key-value pair in the dictionary is the name of the metric and it's corresponding value. Note that the key used here should be unique in the tensorboard record. Otherwise the value you added by ``add_scalar`` will be displayed in hparam plugin. In most cases, this is unwanted. Examples:: from torch.utils.tensorboard import SummaryWriter with SummaryWriter() as w: for i in range(5): w.add_hparams({'lr': 0.1*i, 'bsize': i}, {'hparam/accuracy': 10*i, 'hparam/loss': 10*i}) Expected result: .. image:: _static/img/tensorboard/add_hparam.png :scale: 50 % """ torch._C._log_api_usage_once("tensorboard.logging.add_hparams") if type(hparam_dict) is not dict or type(metric_dict) is not dict: raise TypeError('hparam_dict and metric_dict should be dictionary.') exp, ssi, sei = hparams(hparam_dict, metric_dict) logdir = os.path.join( self._get_file_writer().get_logdir(), str(time.time()) ) with SummaryWriter(log_dir=logdir) as w_hp: w_hp.file_writer.add_summary(exp) w_hp.file_writer.add_summary(ssi) w_hp.file_writer.add_summary(sei) for k, v in metric_dict.items(): w_hp.add_scalar(k, v) def add_scalar(self, tag, scalar_value, global_step=None, walltime=None): """Add scalar data to summary. Args: tag (string): Data identifier scalar_value (float or string/blobname): Value to save global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) with seconds after epoch of event Examples:: from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() x = range(100) for i in x: writer.add_scalar('y=2x', i * 2, i) writer.close() Expected result: .. image:: _static/img/tensorboard/add_scalar.png :scale: 50 % """ torch._C._log_api_usage_once("tensorboard.logging.add_scalar") if self._check_caffe2_blob(scalar_value): scalar_value = workspace.FetchBlob(scalar_value) self._get_file_writer().add_summary( scalar(tag, scalar_value), global_step, walltime) def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None): """Adds many scalar data to summary. Note that this function also keeps logged scalars in memory. In extreme case it explodes your RAM. Args: main_tag (string): The parent name for the tags tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Examples:: from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() r = 5 for i in range(100): writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r), 'xcosx':i*np.cos(i/r), 'tanx': np.tan(i/r)}, i) writer.close() # This call adds three values to the same scalar plot with the tag # 'run_14h' in TensorBoard's scalar section. Expected result: .. image:: _static/img/tensorboard/add_scalars.png :scale: 50 % """ torch._C._log_api_usage_once("tensorboard.logging.add_scalars") walltime = time.time() if walltime is None else walltime fw_logdir = self._get_file_writer().get_logdir() for tag, scalar_value in tag_scalar_dict.items(): fw_tag = fw_logdir + "/" + main_tag.replace("/", "_") + "_" + tag if fw_tag in self.all_writers.keys(): fw = self.all_writers[fw_tag] else: fw = FileWriter(fw_tag, self.max_queue, self.flush_secs, self.filename_suffix) self.all_writers[fw_tag] = fw if self._check_caffe2_blob(scalar_value): scalar_value = workspace.FetchBlob(scalar_value) fw.add_summary(scalar(main_tag, scalar_value), global_step, walltime) def add_histogram(self, tag, values, global_step=None, bins='tensorflow', walltime=None, max_bins=None): """Add histogram to summary. Args: tag (string): Data identifier values (torch.Tensor, numpy.array, or string/blobname): Values to build histogram global_step (int): Global step value to record bins (string): One of {'tensorflow','auto', 'fd', ...}. This determines how the bins are made. You can find other options in: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Examples:: from torch.utils.tensorboard import SummaryWriter import numpy as np writer = SummaryWriter() for i in range(10): x = np.random.random(1000) writer.add_histogram('distribution centers', x + i, i) writer.close() Expected result: .. image:: _static/img/tensorboard/add_histogram.png :scale: 50 % """ torch._C._log_api_usage_once("tensorboard.logging.add_histogram") if self._check_caffe2_blob(values): values = workspace.FetchBlob(values) if isinstance(bins, six.string_types) and bins == 'tensorflow': bins = self.default_bins self._get_file_writer().add_summary( histogram(tag, values, bins, max_bins=max_bins), global_step, walltime) def add_histogram_raw(self, tag, min, max, num, sum, sum_squares, bucket_limits, bucket_counts, global_step=None, walltime=None): """Adds histogram with raw data. Args: tag (string): Data identifier min (float or int): Min value max (float or int): Max value num (int): Number of values sum (float or int): Sum of all values sum_squares (float or int): Sum of squares for all values bucket_limits (torch.Tensor, numpy.array): Upper value per bucket. The number of elements of it should be the same as `bucket_counts`. bucket_counts (torch.Tensor, numpy.array): Number of values per bucket global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/histogram/README.md Examples:: from torch.utils.tensorboard import SummaryWriter import numpy as np writer = SummaryWriter() dummy_data = [] for idx, value in enumerate(range(50)): dummy_data += [idx + 0.001] * value bins = list(range(50+2)) bins = np.array(bins) values = np.array(dummy_data).astype(float).reshape(-1) counts, limits = np.histogram(values, bins=bins) sum_sq = values.dot(values) writer.add_histogram_raw( tag='histogram_with_raw_data', min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limits=limits[1:].tolist(), bucket_counts=counts.tolist(), global_step=0) writer.close() Expected result: .. image:: _static/img/tensorboard/add_histogram_raw.png :scale: 50 % """ torch._C._log_api_usage_once("tensorboard.logging.add_histogram_raw") if len(bucket_limits) != len(bucket_counts): raise ValueError('len(bucket_limits) != len(bucket_counts), see the document.') self._get_file_writer().add_summary( histogram_raw(tag, min, max, num, sum, sum_squares, bucket_limits, bucket_counts), global_step, walltime) def add_image(self, tag, img_tensor, global_step=None, walltime=None, dataformats='CHW'): """Add image data to summary. Note that this requires the ``pillow`` package. Args: tag (string): Data identifier img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Shape: img_tensor: Default is :math:`(3, H, W)`. You can use ``torchvision.utils.make_grid()`` to convert a batch of tensor into 3xHxW format or call ``add_images`` and let us do the job. Tensor with :math:`(1, H, W)`, :math:`(H, W)`, :math:`(H, W, 3)` is also suitable as long as corresponding ``dataformats`` argument is passed, e.g. ``CHW``, ``HWC``, ``HW``. Examples:: from torch.utils.tensorboard import SummaryWriter import numpy as np img = np.zeros((3, 100, 100)) img[0] = np.arange(0, 10000).reshape(100, 100) / 10000 img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 img_HWC = np.zeros((100, 100, 3)) img_HWC[:, :, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 img_HWC[:, :, 1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 writer = SummaryWriter() writer.add_image('my_image', img, 0) # If you have non-default dimension setting, set the dataformats argument. writer.add_image('my_image_HWC', img_HWC, 0, dataformats='HWC') writer.close() Expected result: .. image:: _static/img/tensorboard/add_image.png :scale: 50 % """ torch._C._log_api_usage_once("tensorboard.logging.add_image") if self._check_caffe2_blob(img_tensor): img_tensor = workspace.FetchBlob(img_tensor) self._get_file_writer().add_summary( image(tag, img_tensor, dataformats=dataformats), global_step, walltime) def add_images(self, tag, img_tensor, global_step=None, walltime=None, dataformats='NCHW'): """Add batched image data to summary. Note that this requires the ``pillow`` package. Args: tag (string): Data identifier img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event dataformats (string): Image data format specification of the form NCHW, NHWC, CHW, HWC, HW, WH, etc. Shape: img_tensor: Default is :math:`(N, 3, H, W)`. If ``dataformats`` is specified, other shape will be accepted. e.g. NCHW or NHWC. Examples:: from torch.utils.tensorboard import SummaryWriter import numpy as np img_batch = np.zeros((16, 3, 100, 100)) for i in range(16): img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i writer = SummaryWriter() writer.add_images('my_image_batch', img_batch, 0) writer.close() Expected result: .. image:: _static/img/tensorboard/add_images.png :scale: 30 % """ torch._C._log_api_usage_once("tensorboard.logging.add_images") if self._check_caffe2_blob(img_tensor): img_tensor = workspace.FetchBlob(img_tensor) self._get_file_writer().add_summary( image(tag, img_tensor, dataformats=dataformats), global_step, walltime) def add_image_with_boxes(self, tag, img_tensor, box_tensor, global_step=None, walltime=None, rescale=1, dataformats='CHW'): """Add image and draw bounding boxes on the image. Args: tag (string): Data identifier img_tensor (torch.Tensor, numpy.array, or string/blobname): Image data box_tensor (torch.Tensor, numpy.array, or string/blobname): Box data (for detected objects) global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event rescale (float): Optional scale override dataformats (string): Image data format specification of the form NCHW, NHWC, CHW, HWC, HW, WH, etc. Shape: img_tensor: Default is :math:`(3, H, W)`. It can be specified with ``dataformats`` argument. e.g. CHW or HWC box_tensor: (torch.Tensor, numpy.array, or string/blobname): NX4, where N is the number of boxes and each 4 elememts in a row represents (xmin, ymin, xmax, ymax). """ torch._C._log_api_usage_once("tensorboard.logging.add_image_with_boxes") if self._check_caffe2_blob(img_tensor): img_tensor = workspace.FetchBlob(img_tensor) if self._check_caffe2_blob(box_tensor): box_tensor = workspace.FetchBlob(box_tensor) self._get_file_writer().add_summary(image_boxes( tag, img_tensor, box_tensor, rescale=rescale, dataformats=dataformats), global_step, walltime) def add_figure(self, tag, figure, global_step=None, close=True, walltime=None): """Render matplotlib figure into an image and add it to summary. Note that this requires the ``matplotlib`` package. Args: tag (string): Data identifier figure (matplotlib.pyplot.figure) or list of figures: Figure or a list of figures global_step (int): Global step value to record close (bool): Flag to automatically close the figure walltime (float): Optional override default walltime (time.time()) seconds after epoch of event """ torch._C._log_api_usage_once("tensorboard.logging.add_figure") if isinstance(figure, list): self.add_image(tag, figure_to_image(figure, close), global_step, walltime, dataformats='NCHW') else: self.add_image(tag, figure_to_image(figure, close), global_step, walltime, dataformats='CHW') def add_video(self, tag, vid_tensor, global_step=None, fps=4, walltime=None): """Add video data to summary. Note that this requires the ``moviepy`` package. Args: tag (string): Data identifier vid_tensor (torch.Tensor): Video data global_step (int): Global step value to record fps (float or int): Frames per second walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Shape: vid_tensor: :math:`(N, T, C, H, W)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. """ torch._C._log_api_usage_once("tensorboard.logging.add_video") self._get_file_writer().add_summary( video(tag, vid_tensor, fps), global_step, walltime) def add_audio(self, tag, snd_tensor, global_step=None, sample_rate=44100, walltime=None): """Add audio data to summary. Args: tag (string): Data identifier snd_tensor (torch.Tensor): Sound data global_step (int): Global step value to record sample_rate (int): sample rate in Hz walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Shape: snd_tensor: :math:`(1, L)`. The values should lie between [-1, 1]. """ torch._C._log_api_usage_once("tensorboard.logging.add_audio") if self._check_caffe2_blob(snd_tensor): snd_tensor = workspace.FetchBlob(snd_tensor) self._get_file_writer().add_summary( audio(tag, snd_tensor, sample_rate=sample_rate), global_step, walltime) def add_text(self, tag, text_string, global_step=None, walltime=None): """Add text data to summary. Args: tag (string): Data identifier text_string (string): String to save global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Examples:: writer.add_text('lstm', 'This is an lstm', 0) writer.add_text('rnn', 'This is an rnn', 10) """ torch._C._log_api_usage_once("tensorboard.logging.add_text") self._get_file_writer().add_summary( text(tag, text_string), global_step, walltime) def add_onnx_graph(self, prototxt): torch._C._log_api_usage_once("tensorboard.logging.add_onnx_graph") self._get_file_writer().add_onnx_graph(load_onnx_graph(prototxt)) def add_graph(self, model, input_to_model=None, verbose=False): # prohibit second call? # no, let tensorboard handle it and show its warning message. torch._C._log_api_usage_once("tensorboard.logging.add_graph") """Add graph data to summary. Args: model (torch.nn.Module): Model to draw. input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of variables to be fed. verbose (bool): Whether to print graph structure in console. """ if hasattr(model, 'forward'): # A valid PyTorch model should have a 'forward' method self._get_file_writer().add_graph(graph(model, input_to_model, verbose)) else: # Caffe2 models do not have the 'forward' method from caffe2.proto import caffe2_pb2 from caffe2.python import core from ._caffe2_graph import ( model_to_graph_def, nets_to_graph_def, protos_to_graph_def ) if isinstance(model, list): if isinstance(model[0], core.Net): current_graph = nets_to_graph_def(model) elif isinstance(model[0], caffe2_pb2.NetDef): current_graph = protos_to_graph_def(model) else: # Handles cnn.CNNModelHelper, model_helper.ModelHelper current_graph = model_to_graph_def(model) event = event_pb2.Event( graph_def=current_graph.SerializeToString()) self._get_file_writer().add_event(event) @staticmethod def _encode(rawstr): # I'd use urllib but, I'm unsure about the differences from python3 to python2, etc. retval = rawstr retval = retval.replace("%", "%%%02x" % (ord("%"))) retval = retval.replace("/", "%%%02x" % (ord("/"))) retval = retval.replace("\\", "%%%02x" % (ord("\\"))) return retval def add_embedding(self, mat, metadata=None, label_img=None, global_step=None, tag='default', metadata_header=None): """Add embedding projector data to summary. Args: mat (torch.Tensor or numpy.array): A matrix which each row is the feature vector of the data point metadata (list): A list of labels, each element will be convert to string label_img (torch.Tensor): Images correspond to each data point global_step (int): Global step value to record tag (string): Name for the embedding Shape: mat: :math:`(N, D)`, where N is number of data and D is feature dimension label_img: :math:`(N, C, H, W)` Examples:: import keyword import torch meta = [] while len(meta)<100: meta = meta+keyword.kwlist # get some strings meta = meta[:100] for i, v in enumerate(meta): meta[i] = v+str(i) label_img = torch.rand(100, 3, 10, 32) for i in range(100): label_img[i]*=i/100.0 writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img) writer.add_embedding(torch.randn(100, 5), label_img=label_img) writer.add_embedding(torch.randn(100, 5), metadata=meta) """ torch._C._log_api_usage_once("tensorboard.logging.add_embedding") mat = make_np(mat) if global_step is None: global_step = 0 # clear pbtxt? # Maybe we should encode the tag so slashes don't trip us up? # I don't think this will mess us up, but better safe than sorry. subdir = "%s/%s" % (str(global_step).zfill(5), self._encode(tag)) save_path = os.path.join(self._get_file_writer().get_logdir(), subdir) fs = tf.io.gfile.get_filesystem(save_path) if fs.exists(save_path): if fs.isdir(save_path): print( 'warning: Embedding dir exists, did you set global_step for add_embedding()?') else: raise Exception("Path: `%s` exists, but is a file. Cannot proceed." % save_path) else: fs.makedirs(save_path) if metadata is not None: assert mat.shape[0] == len( metadata), '#labels should equal with #data points' make_tsv(metadata, save_path, metadata_header=metadata_header) if label_img is not None: assert mat.shape[0] == label_img.shape[0], '#images should equal with #data points' make_sprite(label_img, save_path) assert mat.ndim == 2, 'mat should be 2D, where mat.size(0) is the number of data points' make_mat(mat, save_path) # Filesystem doesn't necessarily have append semantics, so we store an # internal buffer to append to and re-write whole file after each # embedding is added if not hasattr(self, "_projector_config"): self._projector_config = ProjectorConfig() embedding_info = get_embedding_info( metadata, label_img, fs, subdir, global_step, tag) self._projector_config.embeddings.extend([embedding_info]) from google.protobuf import text_format config_pbtxt = text_format.MessageToString(self._projector_config) write_pbtxt(self._get_file_writer().get_logdir(), config_pbtxt) def add_pr_curve(self, tag, labels, predictions, global_step=None, num_thresholds=127, weights=None, walltime=None): """Adds precision recall curve. Plotting a precision-recall curve lets you understand your model's performance under different threshold settings. With this function, you provide the ground truth labeling (T/F) and prediction confidence (usually the output of your model) for each target. The TensorBoard UI will let you choose the threshold interactively. Args: tag (string): Data identifier labels (torch.Tensor, numpy.array, or string/blobname): Ground truth data. Binary label for each element. predictions (torch.Tensor, numpy.array, or string/blobname): The probability that an element be classified as true. Value should in [0, 1] global_step (int): Global step value to record num_thresholds (int): Number of thresholds used to draw the curve. walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Examples:: from torch.utils.tensorboard import SummaryWriter import numpy as np labels = np.random.randint(2, size=100) # binary label predictions = np.random.rand(100) writer = SummaryWriter() writer.add_pr_curve('pr_curve', labels, predictions, 0) writer.close() """ torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve") labels, predictions = make_np(labels), make_np(predictions) self._get_file_writer().add_summary( pr_curve(tag, labels, predictions, num_thresholds, weights), global_step, walltime) def add_pr_curve_raw(self, tag, true_positive_counts, false_positive_counts, true_negative_counts, false_negative_counts, precision, recall, global_step=None, num_thresholds=127, weights=None, walltime=None): """Adds precision recall curve with raw data. Args: tag (string): Data identifier true_positive_counts (torch.Tensor, numpy.array, or string/blobname): true positive counts false_positive_counts (torch.Tensor, numpy.array, or string/blobname): false positive counts true_negative_counts (torch.Tensor, numpy.array, or string/blobname): true negative counts false_negative_counts (torch.Tensor, numpy.array, or string/blobname): false negative counts precision (torch.Tensor, numpy.array, or string/blobname): precision recall (torch.Tensor, numpy.array, or string/blobname): recall global_step (int): Global step value to record num_thresholds (int): Number of thresholds used to draw the curve. walltime (float): Optional override default walltime (time.time()) seconds after epoch of event see: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/README.md """ torch._C._log_api_usage_once("tensorboard.logging.add_pr_curve_raw") self._get_file_writer().add_summary( pr_curve_raw(tag, true_positive_counts, false_positive_counts, true_negative_counts, false_negative_counts, precision, recall, num_thresholds, weights), global_step, walltime) def add_custom_scalars_multilinechart(self, tags, category='default', title='untitled'): """Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*. Args: tags (list): list of tags that have been used in ``add_scalar()`` Examples:: writer.add_custom_scalars_multilinechart(['twse/0050', 'twse/2330']) """ torch._C._log_api_usage_once("tensorboard.logging.add_custom_scalars_multilinechart") layout = {category: {title: ['Multiline', tags]}} self._get_file_writer().add_summary(custom_scalars(layout)) def add_custom_scalars_marginchart(self, tags, category='default', title='untitled'): """Shorthand for creating marginchart. Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*, which should have exactly 3 elements. Args: tags (list): list of tags that have been used in ``add_scalar()`` Examples:: writer.add_custom_scalars_marginchart(['twse/0050', 'twse/2330', 'twse/2006']) """ torch._C._log_api_usage_once("tensorboard.logging.add_custom_scalars_marginchart") assert len(tags) == 3 layout = {category: {title: ['Margin', tags]}} self._get_file_writer().add_summary(custom_scalars(layout)) def add_custom_scalars(self, layout): """Create special chart by collecting charts tags in 'scalars'. Note that this function can only be called once for each SummaryWriter() object. Because it only provides metadata to tensorboard, the function can be called before or after the training loop. Args: layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary {chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type (one of **Multiline** or **Margin**) and the second element should be a list containing the tags you have used in add_scalar function, which will be collected into the new chart. Examples:: layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]}, 'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']], 'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}} writer.add_custom_scalars(layout) """ torch._C._log_api_usage_once("tensorboard.logging.add_custom_scalars") self._get_file_writer().add_summary(custom_scalars(layout)) def add_mesh(self, tag, vertices, colors=None, faces=None, config_dict=None, global_step=None, walltime=None): """Add meshes or 3D point clouds to TensorBoard. The visualization is based on Three.js, so it allows users to interact with the rendered object. Besides the basic definitions such as vertices, faces, users can further provide camera parameter, lighting condition, etc. Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for advanced usage. Args: tag (string): Data identifier vertices (torch.Tensor): List of the 3D coordinates of vertices. colors (torch.Tensor): Colors for each vertex faces (torch.Tensor): Indices of vertices within each triangle. (Optional) config_dict: Dictionary with ThreeJS classes names and configuration. global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Shape: vertices: :math:`(B, N, 3)`. (batch, number_of_vertices, channels) colors: :math:`(B, N, 3)`. The values should lie in [0, 255] for type `uint8` or [0, 1] for type `float`. faces: :math:`(B, N, 3)`. The values should lie in [0, number_of_vertices] for type `uint8`. Examples:: from torch.utils.tensorboard import SummaryWriter vertices_tensor = torch.as_tensor([ [1, 1, 1], [-1, -1, 1], [1, -1, -1], [-1, 1, -1], ], dtype=torch.float).unsqueeze(0) colors_tensor = torch.as_tensor([ [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 0, 255], ], dtype=torch.int).unsqueeze(0) faces_tensor = torch.as_tensor([ [0, 2, 3], [0, 3, 1], [0, 1, 2], [1, 3, 2], ], dtype=torch.int).unsqueeze(0) writer = SummaryWriter() writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor) writer.close() """ torch._C._log_api_usage_once("tensorboard.logging.add_mesh") self._get_file_writer().add_summary(mesh(tag, vertices, colors, faces, config_dict), global_step, walltime) def flush(self): """Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk. """ if self.all_writers is None: return for writer in self.all_writers.values(): writer.flush() def close(self): if self.all_writers is None: return # ignore double close for writer in self.all_writers.values(): writer.flush() writer.close() self.file_writer = self.all_writers = None def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close()
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/tensorboard/writer.py
0.818737
0.291176
writer.py
pypi
from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import logging import numpy as np import os # pylint: disable=unused-import from six.moves import range from tensorboard.compat.proto.summary_pb2 import Summary from tensorboard.compat.proto.summary_pb2 import HistogramProto from tensorboard.compat.proto.summary_pb2 import SummaryMetadata from tensorboard.compat.proto.tensor_pb2 import TensorProto from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto from tensorboard.plugins.text.plugin_data_pb2 import TextPluginData from tensorboard.plugins.pr_curve.plugin_data_pb2 import PrCurvePluginData from tensorboard.plugins.custom_scalar import layout_pb2 from ._convert_np import make_np from ._utils import _prepare_video, convert_to_HWC def _calc_scale_factor(tensor): converted = tensor.numpy() if not isinstance(tensor, np.ndarray) else tensor return 1 if converted.dtype == np.uint8 else 255 def _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, color='black', color_text='black', thickness=2): from PIL import ImageDraw, ImageFont font = ImageFont.load_default() draw = ImageDraw.Draw(image) (left, right, top, bottom) = (xmin, xmax, ymin, ymax) draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color) if display_str: text_bottom = bottom # Reverse list and print from bottom to top. text_width, text_height = font.getsize(display_str) margin = np.ceil(0.05 * text_height) draw.rectangle( [(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)], fill=color ) draw.text( (left + margin, text_bottom - text_height - margin), display_str, fill=color_text, font=font ) return image def hparams(hparam_dict=None, metric_dict=None): """Outputs three `Summary` protocol buffers needed by hparams plugin. `Experiment` keeps the metadata of an experiment, such as the name of the hyperparameters and the name of the metrics. `SessionStartInfo` keeps key-value pairs of the hyperparameters `SessionEndInfo` describes status of the experiment e.g. STATUS_SUCCESS Args: hparam_dict: A dictionary that contains names of the hyperparameters and their values. metric_dict: A dictionary that contains names of the metrics and their values. Returns: The `Summary` protobufs for Experiment, SessionStartInfo and SessionEndInfo """ import torch from six import string_types from tensorboard.plugins.hparams.api_pb2 import ( Experiment, HParamInfo, MetricInfo, MetricName, Status ) from tensorboard.plugins.hparams.metadata import ( PLUGIN_NAME, PLUGIN_DATA_VERSION, EXPERIMENT_TAG, SESSION_START_INFO_TAG, SESSION_END_INFO_TAG ) from tensorboard.plugins.hparams.plugin_data_pb2 import ( HParamsPluginData, SessionEndInfo, SessionStartInfo ) # TODO: expose other parameters in the future. # hp = HParamInfo(name='lr',display_name='learning rate', # type=DataType.DATA_TYPE_FLOAT64, domain_interval=Interval(min_value=10, # max_value=100)) # mt = MetricInfo(name=MetricName(tag='accuracy'), display_name='accuracy', # description='', dataset_type=DatasetType.DATASET_VALIDATION) # exp = Experiment(name='123', description='456', time_created_secs=100.0, # hparam_infos=[hp], metric_infos=[mt], user='tw') if not isinstance(hparam_dict, dict): logging.warning('parameter: hparam_dict should be a dictionary, nothing logged.') raise TypeError('parameter: hparam_dict should be a dictionary, nothing logged.') if not isinstance(metric_dict, dict): logging.warning('parameter: metric_dict should be a dictionary, nothing logged.') raise TypeError('parameter: metric_dict should be a dictionary, nothing logged.') hps = [HParamInfo(name=k) for k in hparam_dict.keys()] mts = [MetricInfo(name=MetricName(tag=k)) for k in metric_dict.keys()] exp = Experiment(hparam_infos=hps, metric_infos=mts) content = HParamsPluginData(experiment=exp, version=PLUGIN_DATA_VERSION) smd = SummaryMetadata( plugin_data=SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME, content=content.SerializeToString() ) ) exp = Summary(value=[Summary.Value(tag=EXPERIMENT_TAG, metadata=smd)]) ssi = SessionStartInfo() for k, v in hparam_dict.items(): if isinstance(v, int) or isinstance(v, float): ssi.hparams[k].number_value = v continue if isinstance(v, string_types): ssi.hparams[k].string_value = v continue if isinstance(v, bool): ssi.hparams[k].bool_value = v continue if isinstance(v, torch.Tensor): v = make_np(v)[0] ssi.hparams[k].number_value = v continue raise ValueError('value should be one of int, float, str, bool, or torch.Tensor') content = HParamsPluginData(session_start_info=ssi, version=PLUGIN_DATA_VERSION) smd = SummaryMetadata( plugin_data=SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME, content=content.SerializeToString() ) ) ssi = Summary(value=[Summary.Value(tag=SESSION_START_INFO_TAG, metadata=smd)]) sei = SessionEndInfo(status=Status.Value('STATUS_SUCCESS')) content = HParamsPluginData(session_end_info=sei, version=PLUGIN_DATA_VERSION) smd = SummaryMetadata( plugin_data=SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME, content=content.SerializeToString() ) ) sei = Summary(value=[Summary.Value(tag=SESSION_END_INFO_TAG, metadata=smd)]) return exp, ssi, sei def scalar(name, scalar, collections=None): """Outputs a `Summary` protocol buffer containing a single scalar value. The generated Summary has a Tensor.proto containing the input Tensor. Args: name: A name for the generated node. Will also serve as the series name in TensorBoard. tensor: A real numeric Tensor containing a single value. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[GraphKeys.SUMMARIES]`. Returns: A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf. Raises: ValueError: If tensor has the wrong shape or type. """ scalar = make_np(scalar) assert(scalar.squeeze().ndim == 0), 'scalar should be 0D' scalar = float(scalar) return Summary(value=[Summary.Value(tag=name, simple_value=scalar)]) def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts): # pylint: disable=line-too-long """Outputs a `Summary` protocol buffer with a histogram. The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. min: A float or int min value max: A float or int max value num: Int number of values sum: Float or int sum of all values sum_squares: Float or int sum of squares for all values bucket_limits: A numeric `Tensor` with upper value per bucket bucket_counts: A numeric `Tensor` with number of values per bucket Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ hist = HistogramProto(min=min, max=max, num=num, sum=sum, sum_squares=sum_squares, bucket_limit=bucket_limits, bucket=bucket_counts) return Summary(value=[Summary.Value(tag=name, histo=hist)]) def histogram(name, values, bins, max_bins=None): # pylint: disable=line-too-long """Outputs a `Summary` protocol buffer with a histogram. The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. This op reports an `InvalidArgument` error if any value is not finite. Args: name: A name for the generated node. Will also serve as a series name in TensorBoard. values: A real numeric `Tensor`. Any shape. Values to use to build the histogram. Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ values = make_np(values) hist = make_histogram(values.astype(float), bins, max_bins) return Summary(value=[Summary.Value(tag=name, histo=hist)]) def make_histogram(values, bins, max_bins=None): """Convert values into a histogram proto using logic from histogram.cc.""" if values.size == 0: raise ValueError('The input has no element.') values = values.reshape(-1) counts, limits = np.histogram(values, bins=bins) num_bins = len(counts) if max_bins is not None and num_bins > max_bins: subsampling = num_bins // max_bins subsampling_remainder = num_bins % subsampling if subsampling_remainder != 0: counts = np.pad(counts, pad_width=[[0, subsampling - subsampling_remainder]], mode="constant", constant_values=0) counts = counts.reshape(-1, subsampling).sum(axis=-1) new_limits = np.empty((counts.size + 1,), limits.dtype) new_limits[:-1] = limits[:-1:subsampling] new_limits[-1] = limits[-1] limits = new_limits # Find the first and the last bin defining the support of the histogram: cum_counts = np.cumsum(np.greater(counts, 0, dtype=np.int32)) start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right") start = int(start) end = int(end) + 1 del cum_counts # TensorBoard only includes the right bin limits. To still have the leftmost limit # included, we include an empty bin left. # If start == 0, we need to add an empty one left, otherwise we can just include the bin left to the # first nonzero-count bin: counts = counts[start - 1:end] if start > 0 else np.concatenate([[0], counts[:end]]) limits = limits[start:end + 1] if counts.size == 0 or limits.size == 0: raise ValueError('The histogram is empty, please file a bug report.') sum_sq = values.dot(values) return HistogramProto(min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limit=limits.tolist(), bucket=counts.tolist()) def image(tag, tensor, rescale=1, dataformats='NCHW'): """Outputs a `Summary` protocol buffer with images. The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 3-D with shape `[height, width, channels]` and where `channels` can be: * 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA. The `name` in the outputted Summary.Value protobufs is generated based on the name, with a suffix depending on the max_outputs setting: * If `max_outputs` is 1, the summary value tag is '*name*/image'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*name*/image/0', '*name*/image/1', etc. Args: tag: A name for the generated node. Will also serve as a series name in TensorBoard. tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width, channels]` where `channels` is 1, 3, or 4. 'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8). The image() function will scale the image values to [0, 255] by applying a scale factor of either 1 (uint8) or 255 (float32). Returns: A scalar `Tensor` of type `string`. The serialized `Summary` protocol buffer. """ tensor = make_np(tensor) tensor = convert_to_HWC(tensor, dataformats) # Do not assume that user passes in values in [0, 255], use data type to detect scale_factor = _calc_scale_factor(tensor) tensor = tensor.astype(np.float32) tensor = (tensor * scale_factor).astype(np.uint8) image = make_image(tensor, rescale=rescale) return Summary(value=[Summary.Value(tag=tag, image=image)]) def image_boxes(tag, tensor_image, tensor_boxes, rescale=1, dataformats='CHW'): '''Outputs a `Summary` protocol buffer with images.''' tensor_image = make_np(tensor_image) tensor_image = convert_to_HWC(tensor_image, dataformats) tensor_boxes = make_np(tensor_boxes) tensor_image = tensor_image.astype( np.float32) * _calc_scale_factor(tensor_image) image = make_image(tensor_image.astype(np.uint8), rescale=rescale, rois=tensor_boxes) return Summary(value=[Summary.Value(tag=tag, image=image)]) def draw_boxes(disp_image, boxes): # xyxy format num_boxes = boxes.shape[0] list_gt = range(num_boxes) for i in list_gt: disp_image = _draw_single_box(disp_image, boxes[i, 0], boxes[i, 1], boxes[i, 2], boxes[i, 3], display_str=None, color='Red') return disp_image def make_image(tensor, rescale=1, rois=None): """Convert a numpy representation of an image to Image protobuf""" from PIL import Image height, width, channel = tensor.shape scaled_height = int(height * rescale) scaled_width = int(width * rescale) image = Image.fromarray(tensor) if rois is not None: image = draw_boxes(image, rois) image = image.resize((scaled_width, scaled_height), Image.ANTIALIAS) import io output = io.BytesIO() image.save(output, format='PNG') image_string = output.getvalue() output.close() return Summary.Image(height=height, width=width, colorspace=channel, encoded_image_string=image_string) def video(tag, tensor, fps=4): tensor = make_np(tensor) tensor = _prepare_video(tensor) # If user passes in uint8, then we don't need to rescale by 255 scale_factor = _calc_scale_factor(tensor) tensor = tensor.astype(np.float32) tensor = (tensor * scale_factor).astype(np.uint8) video = make_video(tensor, fps) return Summary(value=[Summary.Value(tag=tag, image=video)]) def make_video(tensor, fps): try: import moviepy # noqa: F401 except ImportError: print('add_video needs package moviepy') return try: from moviepy import editor as mpy except ImportError: print("moviepy is installed, but can't import moviepy.editor.", "Some packages could be missing [imageio, requests]") return import tempfile t, h, w, c = tensor.shape # encode sequence of images into gif string clip = mpy.ImageSequenceClip(list(tensor), fps=fps) filename = tempfile.NamedTemporaryFile(suffix='.gif', delete=False).name try: # newer version of moviepy use logger instead of progress_bar argument. clip.write_gif(filename, verbose=False, logger=None) except TypeError: try: # older version of moviepy does not support progress_bar argument. clip.write_gif(filename, verbose=False, progress_bar=False) except TypeError: clip.write_gif(filename, verbose=False) with open(filename, 'rb') as f: tensor_string = f.read() try: os.remove(filename) except OSError: logging.warning('The temporary file used by moviepy cannot be deleted.') return Summary.Image(height=h, width=w, colorspace=c, encoded_image_string=tensor_string) def audio(tag, tensor, sample_rate=44100): tensor = make_np(tensor) tensor = tensor.squeeze() if abs(tensor).max() > 1: print('warning: audio amplitude out of range, auto clipped.') tensor = tensor.clip(-1, 1) assert(tensor.ndim == 1), 'input tensor should be 1 dimensional.' tensor_list = [int(32767.0 * x) for x in tensor] import io import wave import struct fio = io.BytesIO() wave_write = wave.open(fio, 'wb') wave_write.setnchannels(1) wave_write.setsampwidth(2) wave_write.setframerate(sample_rate) tensor_enc = b'' for v in tensor_list: tensor_enc += struct.pack('<h', v) wave_write.writeframes(tensor_enc) wave_write.close() audio_string = fio.getvalue() fio.close() audio = Summary.Audio(sample_rate=sample_rate, num_channels=1, length_frames=len(tensor_list), encoded_audio_string=audio_string, content_type='audio/wav') return Summary(value=[Summary.Value(tag=tag, audio=audio)]) def custom_scalars(layout): categories = [] for k, v in layout.items(): charts = [] for chart_name, chart_meatadata in v.items(): tags = chart_meatadata[1] if chart_meatadata[0] == 'Margin': assert len(tags) == 3 mgcc = layout_pb2.MarginChartContent(series=[layout_pb2.MarginChartContent.Series(value=tags[0], lower=tags[1], upper=tags[2])]) chart = layout_pb2.Chart(title=chart_name, margin=mgcc) else: mlcc = layout_pb2.MultilineChartContent(tag=tags) chart = layout_pb2.Chart(title=chart_name, multiline=mlcc) charts.append(chart) categories.append(layout_pb2.Category(title=k, chart=charts)) layout = layout_pb2.Layout(category=categories) plugin_data = SummaryMetadata.PluginData(plugin_name='custom_scalars') smd = SummaryMetadata(plugin_data=plugin_data) tensor = TensorProto(dtype='DT_STRING', string_val=[layout.SerializeToString()], tensor_shape=TensorShapeProto()) return Summary(value=[Summary.Value(tag='custom_scalars__config__', tensor=tensor, metadata=smd)]) def text(tag, text): plugin_data = SummaryMetadata.PluginData( plugin_name='text', content=TextPluginData(version=0).SerializeToString()) smd = SummaryMetadata(plugin_data=plugin_data) tensor = TensorProto(dtype='DT_STRING', string_val=[text.encode(encoding='utf_8')], tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(size=1)])) return Summary(value=[Summary.Value(tag=tag + '/text_summary', metadata=smd, tensor=tensor)]) def pr_curve_raw(tag, tp, fp, tn, fn, precision, recall, num_thresholds=127, weights=None): if num_thresholds > 127: # weird, value > 127 breaks protobuf num_thresholds = 127 data = np.stack((tp, fp, tn, fn, precision, recall)) pr_curve_plugin_data = PrCurvePluginData( version=0, num_thresholds=num_thresholds).SerializeToString() plugin_data = SummaryMetadata.PluginData( plugin_name='pr_curves', content=pr_curve_plugin_data) smd = SummaryMetadata(plugin_data=plugin_data) tensor = TensorProto(dtype='DT_FLOAT', float_val=data.reshape(-1).tolist(), tensor_shape=TensorShapeProto( dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])])) return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) def pr_curve(tag, labels, predictions, num_thresholds=127, weights=None): # weird, value > 127 breaks protobuf num_thresholds = min(num_thresholds, 127) data = compute_curve(labels, predictions, num_thresholds=num_thresholds, weights=weights) pr_curve_plugin_data = PrCurvePluginData( version=0, num_thresholds=num_thresholds).SerializeToString() plugin_data = SummaryMetadata.PluginData( plugin_name='pr_curves', content=pr_curve_plugin_data) smd = SummaryMetadata(plugin_data=plugin_data) tensor = TensorProto(dtype='DT_FLOAT', float_val=data.reshape(-1).tolist(), tensor_shape=TensorShapeProto( dim=[TensorShapeProto.Dim(size=data.shape[0]), TensorShapeProto.Dim(size=data.shape[1])])) return Summary(value=[Summary.Value(tag=tag, metadata=smd, tensor=tensor)]) # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py def compute_curve(labels, predictions, num_thresholds=None, weights=None): _MINIMUM_COUNT = 1e-7 if weights is None: weights = 1.0 # Compute bins of true positives and false positives. bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1))) float_labels = labels.astype(np.float) histogram_range = (0, num_thresholds - 1) tp_buckets, _ = np.histogram( bucket_indices, bins=num_thresholds, range=histogram_range, weights=float_labels * weights) fp_buckets, _ = np.histogram( bucket_indices, bins=num_thresholds, range=histogram_range, weights=(1.0 - float_labels) * weights) # Obtain the reverse cumulative sum. tp = np.cumsum(tp_buckets[::-1])[::-1] fp = np.cumsum(fp_buckets[::-1])[::-1] tn = fp[0] - fp fn = tp[0] - tp precision = tp / np.maximum(_MINIMUM_COUNT, tp + fp) recall = tp / np.maximum(_MINIMUM_COUNT, tp + fn) return np.stack((tp, fp, tn, fn, precision, recall)) def _get_tensor_summary(name, display_name, description, tensor, content_type, components, json_config): """Creates a tensor summary with summary metadata. Args: name: Uniquely identifiable name of the summary op. Could be replaced by combination of name and type to make it unique even outside of this summary. display_name: Will be used as the display name in TensorBoard. Defaults to `name`. description: A longform readable description of the summary data. Markdown is supported. tensor: Tensor to display in summary. content_type: Type of content inside the Tensor. components: Bitmask representing present parts (vertices, colors, etc.) that belong to the summary. json_config: A string, JSON-serialized dictionary of ThreeJS classes configuration. Returns: Tensor summary with metadata. """ import torch from tensorboard.plugins.mesh import metadata tensor = torch.as_tensor(tensor) tensor_metadata = metadata.create_summary_metadata( name, display_name, content_type, components, tensor.shape, description, json_config=json_config) tensor = TensorProto(dtype='DT_FLOAT', float_val=tensor.reshape(-1).tolist(), tensor_shape=TensorShapeProto(dim=[ TensorShapeProto.Dim(size=tensor.shape[0]), TensorShapeProto.Dim(size=tensor.shape[1]), TensorShapeProto.Dim(size=tensor.shape[2]), ])) tensor_summary = Summary.Value( tag=metadata.get_instance_name(name, content_type), tensor=tensor, metadata=tensor_metadata, ) return tensor_summary def _get_json_config(config_dict): """Parses and returns JSON string from python dictionary.""" json_config = '{}' if config_dict is not None: json_config = json.dumps(config_dict, sort_keys=True) return json_config # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/mesh/summary.py def mesh(tag, vertices, colors, faces, config_dict, display_name=None, description=None): """Outputs a merged `Summary` protocol buffer with a mesh/point cloud. Args: tag: A name for this summary operation. vertices: Tensor of shape `[dim_1, ..., dim_n, 3]` representing the 3D coordinates of vertices. faces: Tensor of shape `[dim_1, ..., dim_n, 3]` containing indices of vertices within each triangle. colors: Tensor of shape `[dim_1, ..., dim_n, 3]` containing colors for each vertex. display_name: If set, will be used as the display name in TensorBoard. Defaults to `name`. description: A longform readable description of the summary data. Markdown is supported. config_dict: Dictionary with ThreeJS classes names and configuration. Returns: Merged summary for mesh/point cloud representation. """ from tensorboard.plugins.mesh.plugin_data_pb2 import MeshPluginData from tensorboard.plugins.mesh import metadata json_config = _get_json_config(config_dict) summaries = [] tensors = [ (vertices, MeshPluginData.VERTEX), (faces, MeshPluginData.FACE), (colors, MeshPluginData.COLOR) ] tensors = [tensor for tensor in tensors if tensor[0] is not None] components = metadata.get_components_bitmask([ content_type for (tensor, content_type) in tensors]) for tensor, content_type in tensors: summaries.append( _get_tensor_summary(tag, display_name, description, tensor, content_type, components, json_config)) return Summary(value=summaries)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/tensorboard/summary.py
0.814754
0.2135
summary.py
pypi
import math import numpy as np from ._convert_np import make_np from ._utils import make_grid from tensorboard.compat import tf from tensorboard.plugins.projector.projector_config_pb2 import EmbeddingInfo def make_tsv(metadata, save_path, metadata_header=None): if not metadata_header: metadata = [str(x) for x in metadata] else: assert len(metadata_header) == len(metadata[0]), \ 'len of header must be equal to the number of columns in metadata' metadata = ['\t'.join(str(e) for e in l) for l in [metadata_header] + metadata] metadata_bytes = tf.compat.as_bytes('\n'.join(metadata) + '\n') fs = tf.io.gfile.get_filesystem(save_path) fs.write(fs.join(save_path, 'metadata.tsv'), metadata_bytes, binary_mode=True) # https://github.com/tensorflow/tensorboard/issues/44 image label will be squared def make_sprite(label_img, save_path): from PIL import Image from io import BytesIO # this ensures the sprite image has correct dimension as described in # https://www.tensorflow.org/get_started/embedding_viz nrow = int(math.ceil((label_img.size(0)) ** 0.5)) arranged_img_CHW = make_grid(make_np(label_img), ncols=nrow) # augment images so that #images equals nrow*nrow arranged_augment_square_HWC = np.ndarray((arranged_img_CHW.shape[2], arranged_img_CHW.shape[2], 3)) arranged_img_HWC = arranged_img_CHW.transpose(1, 2, 0) # chw -> hwc arranged_augment_square_HWC[:arranged_img_HWC.shape[0], :, :] = arranged_img_HWC im = Image.fromarray(np.uint8((arranged_augment_square_HWC * 255).clip(0, 255))) with BytesIO() as buf: im.save(buf, format="PNG") im_bytes = buf.getvalue() fs = tf.io.gfile.get_filesystem(save_path) fs.write(fs.join(save_path, 'sprite.png'), im_bytes, binary_mode=True) def get_embedding_info(metadata, label_img, filesys, subdir, global_step, tag): info = EmbeddingInfo() info.tensor_name = "{}:{}".format(tag, str(global_step).zfill(5)) info.tensor_path = filesys.join(subdir, 'tensors.tsv') if metadata is not None: info.metadata_path = filesys.join(subdir, 'metadata.tsv') if label_img is not None: info.sprite.image_path = filesys.join(subdir, 'sprite.png') info.sprite.single_image_dim.extend([label_img.size(3), label_img.size(2)]) return info def write_pbtxt(save_path, contents): fs = tf.io.gfile.get_filesystem(save_path) config_path = fs.join(save_path, 'projector_config.pbtxt') fs.write(config_path, tf.compat.as_bytes(contents), binary_mode=True) def make_mat(matlist, save_path): fs = tf.io.gfile.get_filesystem(save_path) with tf.io.gfile.GFile(fs.join(save_path, 'tensors.tsv'), 'wb') as f: for x in matlist: x = [str(i.item()) for i in x] f.write(tf.compat.as_bytes('\t'.join(x) + '\n'))
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/tensorboard/_embedding.py
0.703549
0.425128
_embedding.py
pypi
import math import torch from . import Sampler import torch.distributed as dist class DistributedSampler(Sampler): """Sampler that restricts data loading to a subset of the dataset. It is especially useful in conjunction with :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each process can pass a DistributedSampler instance as a DataLoader sampler, and load a subset of the original dataset that is exclusive to it. .. note:: Dataset is assumed to be of constant size. Arguments: dataset: Dataset used for sampling. num_replicas (optional): Number of processes participating in distributed training. rank (optional): Rank of the current process within num_replicas. shuffle (optional): If true (default), sampler will shuffle the indices .. warning:: In distributed mode, calling the ``set_epoch`` method is needed to make shuffling work; each process will use the same random seed otherwise. Example:: >>> sampler = DistributedSampler(dataset) if is_distributed else None >>> loader = DataLoader(dataset, shuffle=(sampler is None), ... sampler=sampler) >>> for epoch in range(start_epoch, n_epochs): ... if is_distributed: """ def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas self.shuffle = shuffle def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) if self.shuffle: indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = list(range(len(self.dataset))) # add extra samples to make it evenly divisible indices += indices[:(self.total_size - len(indices))] assert len(indices) == self.total_size # subsample indices = indices[self.rank:self.total_size:self.num_replicas] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/data/distributed.py
0.904924
0.63744
distributed.py
pypi
r""""Signal handling for multiprocessing data loading. NOTE [ Signal handling in multiprocessing data loading ] In cases like DataLoader, if a worker process dies due to bus error/segfault or just hang, the main process will hang waiting for data. This is difficult to avoid on PyTorch side as it can be caused by limited shm, or other libraries users call in the workers. In this file and `DataLoader.cpp`, we make our best effort to provide some error message to users when such unfortunate events happen. When a _BaseDataLoaderIter starts worker processes, their pids are registered in a defined in `DataLoader.cpp`: id(_BaseDataLoaderIter) => Collection[ Worker pids ] via `_set_worker_pids`. When an error happens in a worker process, the main process received a SIGCHLD, and Python will eventually call the handler registered below (in `_set_SIGCHLD_handler`). In the handler, the `_error_if_any_worker_fails` call checks all registered worker pids and raise proper error message to prevent main process from hanging waiting for data from worker. Additionally, at the beginning of each worker's `_utils.worker._worker_loop`, `_set_worker_signal_handlers` is called to register critical signal handlers (e.g., for SIGSEGV, SIGBUS, SIGFPE, SIGTERM) in C, which just prints an error message to stderr before triggering the default handler. So a message will also be printed from the worker process when it is killed by such signals. See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for the reasoning of this signal handling design and other mechanism we implement to make our multiprocessing data loading robust to errors. """ import signal import threading from . import IS_WINDOWS # Some of the following imported functions are not used in this file, but are to # be used `_utils.signal_handling.XXXXX`. from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401 from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401 _SIGCHLD_handler_set = False r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one handler needs to be set for all DataLoaders in a process.""" def _set_SIGCHLD_handler(): # Windows doesn't support SIGCHLD handler if IS_WINDOWS: return # can't set signal in child threads if not isinstance(threading.current_thread(), threading._MainThread): return global _SIGCHLD_handler_set if _SIGCHLD_handler_set: return previous_handler = signal.getsignal(signal.SIGCHLD) if not callable(previous_handler): # This doesn't catch default handler, but SIGCHLD default handler is a # no-op. previous_handler = None def handler(signum, frame): # This following call uses `waitid` with WNOHANG from C side. Therefore, # Python can still get and update the process status successfully. _error_if_any_worker_fails() if previous_handler is not None: previous_handler(signum, frame) signal.signal(signal.SIGCHLD, handler) _SIGCHLD_handler_set = True
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/data/_utils/signal_handling.py
0.691289
0.559531
signal_handling.py
pypi
r""""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory. These **needs** to be in global scope since Py2 doesn't support serializing static methods. """ import torch from torch._six import queue, container_abcs, string_classes from . import MP_STATUS_CHECK_INTERVAL from torch._utils import ExceptionWrapper def _pin_memory_loop(in_queue, out_queue, device_id, done_event): # This setting is thread local, and prevents the copy in pin_memory from # consuming all CPU cores. torch.set_num_threads(1) torch.cuda.set_device(device_id) # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the # logic of this function. while not done_event.is_set(): try: r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) except queue.Empty: continue idx, data = r if not done_event.is_set() and not isinstance(data, ExceptionWrapper): try: data = pin_memory(data) except Exception: data = ExceptionWrapper( where="in pin memory thread for device {}".format(device_id)) r = (idx, data) while not done_event.is_set(): try: out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL) break except queue.Full: continue del r # save memory def pin_memory(data): if isinstance(data, torch.Tensor): return data.pin_memory() elif isinstance(data, string_classes): return data elif isinstance(data, container_abcs.Mapping): return {k: pin_memory(sample) for k, sample in data.items()} elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple return type(data)(*(pin_memory(sample) for sample in data)) elif isinstance(data, container_abcs.Sequence): return [pin_memory(sample) for sample in data] elif hasattr(data, "pin_memory"): return data.pin_memory() else: return data
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/data/_utils/pin_memory.py
0.721056
0.32336
pin_memory.py
pypi
r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers to collate samples fetched from dataset into Tensor(s). These **needs** to be in global scope since Py2 doesn't support serializing static methods. """ import torch import re from torch._six import container_abcs, string_classes, int_classes np_str_obj_array_pattern = re.compile(r'[SaUO]') def default_convert(data): r"""Converts each NumPy array data field into a tensor""" elem_type = type(data) if isinstance(data, torch.Tensor): return data elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': # array of string classes and object if elem_type.__name__ == 'ndarray' \ and np_str_obj_array_pattern.search(data.dtype.str) is not None: return data return torch.as_tensor(data) elif isinstance(data, container_abcs.Mapping): return {key: default_convert(data[key]) for key in data} elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple return elem_type(*(default_convert(d) for d in data)) elif isinstance(data, container_abcs.Sequence) and not isinstance(data, string_classes): return [default_convert(d) for d in data] else: return data default_collate_err_msg_format = ( "default_collate: batch must contain tensors, numpy arrays, numbers, " "dicts or lists; found {}") def default_collate(batch): r"""Puts each data field into a tensor with outer dimension batch size""" elem = batch[0] elem_type = type(elem) if isinstance(elem, torch.Tensor): out = None if torch.utils.data.get_worker_info() is not None: # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = elem.storage()._new_shared(numel) out = elem.new(storage) return torch.stack(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': elem = batch[0] if elem_type.__name__ == 'ndarray': # array of string classes and object if np_str_obj_array_pattern.search(elem.dtype.str) is not None: raise TypeError(default_collate_err_msg_format.format(elem.dtype)) return default_collate([torch.as_tensor(b) for b in batch]) elif elem.shape == (): # scalars return torch.as_tensor(batch) elif isinstance(elem, float): return torch.tensor(batch, dtype=torch.float64) elif isinstance(elem, int_classes): return torch.tensor(batch) elif isinstance(elem, string_classes): return batch elif isinstance(elem, container_abcs.Mapping): return {key: default_collate([d[key] for d in batch]) for key in elem} elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple return elem_type(*(default_collate(samples) for samples in zip(*batch))) elif isinstance(elem, container_abcs.Sequence): transposed = zip(*batch) return [default_collate(samples) for samples in transposed] raise TypeError(default_collate_err_msg_format.format(elem_type))
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/utils/data/_utils/collate.py
0.79162
0.551211
collate.py
pypi
import sys import torch def is_built(): r"""Returns whether PyTorch is built with CUDA support. Note that this doesn't necessarily mean CUDA is available; just that if this PyTorch binary were run a machine with working CUDA drivers and devices, we would be able to use it.""" return torch._C.has_cuda class cuFFTPlanCacheAttrContextProp(object): # Like regular ContextProp, but uses the `.device_index` attribute from the # calling object as the first argument to the getter and setter. def __init__(self, getter, setter): self.getter = getter self.setter = setter def __get__(self, obj, objtype): return self.getter(obj.device_index) def __set__(self, obj, val): if isinstance(self.setter, str): raise RuntimeError(self.setter) self.setter(obj.device_index, val) class cuFFTPlanCache(object): r""" Represents a specific plan cache for a specific `device_index`. The attributes `size` and `max_size`, and method `clear`, can fetch and/ or change properties of the C++ cuFFT plan cache. """ def __init__(self, device_index): self.device_index = device_index size = cuFFTPlanCacheAttrContextProp( torch._cufft_get_plan_cache_size, '.size is a read-only property showing the number of plans currently in the ' 'cache. To change the cache capacity, set cufft_plan_cache.max_size.') max_size = cuFFTPlanCacheAttrContextProp(torch._cufft_get_plan_cache_max_size, torch._cufft_set_plan_cache_max_size) def clear(self): return torch._cufft_clear_plan_cache(self.device_index) class cuFFTPlanCacheManager(object): r""" Represents all cuFFT plan caches. When indexed with a device object/index, this object returns the `cuFFTPlanCache` corresponding to that device. Finally, this object, when used directly as a `cuFFTPlanCache` object (e.g., setting the `.max_size`) attribute, the current device's cuFFT plan cache is used. """ __initialized = False def __init__(self): self.caches = [] self.__initialized = True def __getitem__(self, device): index = torch.cuda._utils._get_device_index(device) if index < 0 or index >= torch.cuda.device_count(): raise RuntimeError( ("cufft_plan_cache: expected 0 <= device index < {}, but got " "device with index {}").format(torch.cuda.device_count(), index)) if len(self.caches) == 0: self.caches.extend(cuFFTPlanCache(index) for index in range(torch.cuda.device_count())) return self.caches[index] def __getattr__(self, name): return getattr(self[torch.cuda.current_device()], name) def __setattr__(self, name, value): if self.__initialized: return setattr(self[torch.cuda.current_device()], name, value) else: return super(cuFFTPlanCacheManager, self).__setattr__(name, value) class CUDAModule(object): def __init__(self, m): self.__dict__ = m.__dict__ # You have to retain the old module, otherwise it will # get GC'ed and a lot of things will break. See: # https://stackoverflow.com/questions/47540722/how-do-i-use-the-sys-modules-replacement-trick-in-init-py-on-python-2 self.__old_mod = m cufft_plan_cache = cuFFTPlanCacheManager() # This is the sys.modules replacement trick, see # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273 sys.modules[__name__] = CUDAModule(sys.modules[__name__])
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/backends/cuda/__init__.py
0.701406
0.382545
__init__.py
pypi
try: from urllib.parse import urlparse, urlunparse except ImportError: from urlparse import urlparse, urlunparse import torch._six as six import numbers import os from . import FileStore, TCPStore from .constants import default_pg_timeout _rendezvous_handlers = {} def register_rendezvous_handler(scheme, handler): """Registers a new rendezvous handler. Before we can run collective algorithms, participating processes need to find each other and exchange information to be able to communicate. We call this process rendezvous. The outcome of the rendezvous process is a triplet containing a shared key/value store, the rank of the process, and the total number of participating processes. If none of the bundled rendezvous methods apply to your execution environment you can opt to register your own rendezvous handler. Pick a unique name and use the URL scheme to identify it when calling the `rendezvous()` function. Arguments: scheme (str): URL scheme to identify your rendezvous handler. handler (function): Handler that is invoked when the `rendezvous()` function is called with a URL that uses the corresponding scheme. It must be a generator function that yields the triplet. """ global _rendezvous_handlers if scheme in _rendezvous_handlers: raise RuntimeError( "Rendezvous handler for {}:// already registered".format(scheme) ) _rendezvous_handlers[scheme] = handler def rendezvous(url, rank=-1, world_size=-1, **kwargs): if not isinstance(url, six.string_classes): raise RuntimeError("`url` must be a string. {}: {}".format(type(url), url)) if not isinstance(rank, numbers.Integral): raise RuntimeError("`rank` must be an integer. {}".format(rank)) if not isinstance(world_size, numbers.Integral): raise RuntimeError("`world_size` must be an integer. {}".format(world_size)) # Append node-specific arguments. result = urlparse(url) if rank != -1 or world_size != -1: query_dict = dict( pair.split("=") for pair in filter(None, result.query.split("&")) ) assert ( "rank" not in query_dict and "world_size" not in query_dict ), "The url: {url} has node-specific arguments(rank, world_size) already.".format( url=url ) if rank != -1: query_dict["rank"] = rank if world_size != -1: query_dict["world_size"] = world_size result = result._replace( query="{}".format("&".join(["{}={}".format(k, v) for k, v in query_dict.items()])) ) url = urlunparse(result) if result.scheme not in _rendezvous_handlers: raise RuntimeError("No rendezvous handler for {}://".format(result.scheme)) return _rendezvous_handlers[result.scheme](url, **kwargs) def _rendezvous_error(msg): return ValueError("Error initializing torch.distributed using " + msg) def _file_rendezvous_handler(url, **kwargs): def _error(msg): return _rendezvous_error("file:// rendezvous: " + msg) result = urlparse(url) path = result.path if not path: raise _error("path missing") query = dict(pair.split("=") for pair in filter(None, result.query.split("&"))) if "rank" not in query: raise _error("rank parameter missing") if "world_size" not in query: raise _error("world size parameter missing") rank = int(query["rank"]) world_size = int(query["world_size"]) store = FileStore(path, world_size) yield (store, rank, world_size) # If this configuration is invalidated, there is nothing we can do about it raise RuntimeError("Unable to perform rerendezvous using file:// method") def _tcp_rendezvous_handler(url, timeout=default_pg_timeout, **kwargs): def _error(msg): return _rendezvous_error("tcp:// rendezvous: " + msg) result = urlparse(url) if not result.port: raise _error("port number missing") query = dict(pair.split("=") for pair in filter(None, result.query.split("&"))) if "rank" not in query: raise _error("rank parameter missing") if "world_size" not in query: raise _error("world size parameter missing") rank = int(query["rank"]) world_size = int(query["world_size"]) start_daemon = rank == 0 store = TCPStore(result.hostname, result.port, world_size, start_daemon, timeout) yield (store, rank, world_size) # If this configuration is invalidated, there is nothing we can do about it raise RuntimeError("Unable to perform rerendezvous using tcp:// method") def _env_rendezvous_handler(url, timeout=default_pg_timeout, **kwargs): def _error(msg): return _rendezvous_error("env:// rendezvous: " + msg) def _env_error(var): return _error("environment variable %s expected, but not set" % var) result = urlparse(url) query = dict(pair.split("=") for pair in filter(None, result.query.split("&"))) if "rank" in query: rank = int(query["rank"]) else: rank = os.environ.get("RANK", None) if rank is None: raise _env_error("RANK") if "world_size" in query: world_size = int(query["world_size"]) else: world_size = os.environ.get("WORLD_SIZE", None) if world_size is None: raise _env_error("WORLD_SIZE") master_addr = os.environ.get("MASTER_ADDR", None) if master_addr is None: raise _env_error("MASTER_ADDR") master_port = os.environ.get("MASTER_PORT", None) if master_port is None: raise _env_error("MASTER_PORT") # Converting before creating the store rank = int(rank) world_size = int(world_size) master_port = int(master_port) # Now start the TCP store daemon on the rank 0 start_daemon = rank == 0 store = TCPStore(master_addr, master_port, world_size, start_daemon, timeout) yield (store, rank, world_size) # If this configuration is invalidated, there is nothing we can do about it raise RuntimeError("Unable to perform rerendezvous using env:// method") register_rendezvous_handler("file", _file_rendezvous_handler) register_rendezvous_handler("tcp", _tcp_rendezvous_handler) register_rendezvous_handler("env", _env_rendezvous_handler)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributed/rendezvous.py
0.752377
0.282779
rendezvous.py
pypi
import torch.distributed.rpc as rpc import torch.distributed.autograd as dist_autograd from collections import defaultdict from threading import Lock class _LocalOptimizer: # Ideally we would only need to share a lock for instances of # _LocalOptimizer that deal with the same parameters. We are # making a simplifying assumption here that if there is more # than one instance of _LocalOptimizer per worker, they will # be optimizing the same parameters (e.g. each data parallel # trainer will create its own instance of _LocalOptimizer but # they will all optimize the same parameters on each worker) global_lock = Lock() def __init__(self, optim_cls, local_params_rref, *args, **kwargs): self.optim = optim_cls( [rref.local_value() for rref in local_params_rref], *args, **kwargs) def step(self, autograd_ctx_id): all_local_grads = dist_autograd.get_gradients(autograd_ctx_id) with _LocalOptimizer.global_lock: for param, grad in all_local_grads.items(): param.grad = grad self.optim.step() def _new_local_optimizer(optim_cls, local_params_rref, *args, **kwargs): return rpc.RRef( _LocalOptimizer(optim_cls, local_params_rref, *args, **kwargs)) def _local_optimizer_step(local_optim_rref, autograd_ctx_id): local_optim = local_optim_rref.local_value() local_optim.step(autograd_ctx_id) def _wait_for_all(rpc_futs): # TODO: improve error propagation exception = None results = [] for fut in rpc_futs: try: results.append(fut.wait()) except Exception as e: results.append(e) exception = e if exception is not None: raise exception return results class DistributedOptimizer: """ DistributedOptimizer takes remote references to parameters scattered across workers and applies the given optimizer locally for each parameter. This class uses :meth:`~torch.distributed.autograd.get_gradients` in order to retrieve the gradients for specific parameters. Concurrent calls to :meth:`~torch.distributed.optim.DistributedOptimizer.step`, either from the same or different clients, will be serialized on each worker -- as each worker's optimizer can only work on one set of gradients at a time. However, there is no guarantee that the full forward-backward-optimizer sequence will execute for one client at a time. This means that the gradients being applied may not correspond to the latest forward pass executed on a given worker. Also, there is no guaranteed ordering across workers. Args: optimizer_class (optim.Optimizer): the class of optimizer to instantiate on each worker. params_rref (list[RRef]): list of RRefs to local or remote parameters to optimize. args: arguments to pass to the optimizer constructor on each worker. kwargs: arguments to pass to the optimizer constructor on each worker. Example:: >>> import torch.distributed.autograd as dist_autograd >>> import torch.distributed.rpc as rpc >>> from torch import optim >>> from torch.distributed.optim import DistributedOptimizer >>> >>> with dist_autograd.context() as context_id: >>> # Forward pass. >>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3)) >>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1)) >>> loss = rref1.to_here() + rref2.to_here() >>> >>> # Backward pass. >>> dist_autograd.backward(context_id, [loss.sum()]) >>> >>> # Optimizer. >>> dist_optim = DistributedOptimizer( >>> optim.SGD, >>> [rref1, rref2], >>> lr=0.05, >>> ) >>> dist_optim.step(context_id) """ def __init__(self, optimizer_class, params_rref, *args, **kwargs): per_worker_params_rref = defaultdict(list) for param in params_rref: per_worker_params_rref[param.owner()].append(param) remote_optim_futs = [] for worker, param_rrefs in per_worker_params_rref.items(): remote_optim_rref_fut = rpc.rpc_async( worker, _new_local_optimizer, args=(optimizer_class, param_rrefs) + args, kwargs=kwargs, ) remote_optim_futs.append(remote_optim_rref_fut) self.remote_optimizers = _wait_for_all(remote_optim_futs) def step(self, context_id): """ Performs a single optimization step. This will call :meth:`torch.optim.Optimizer.step` on each worker containing parameters to be optimized, and will block until all workers return. The provided ``context_id`` will be used to retrieve the corresponding :class:`~torch.distributed.autograd.context` that contains the gradients that should be applied to the parameters. Args: context_id: the autograd context id for which we should run the optimizer step. """ dist_autograd._is_valid_context(context_id) rpc_futs = [] for optim in self.remote_optimizers: rpc_futs.append(rpc.rpc_async( optim.owner(), _local_optimizer_step, args=(optim, context_id), )) _wait_for_all(rpc_futs)
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/distributed/optim/optimizer.py
0.770983
0.378861
optimizer.py
pypi
from __future__ import absolute_import, division, print_function, unicode_literals import multiprocessing import multiprocessing.connection import signal import sys import warnings from . import _prctl_pr_set_pdeathsig def _wrap(fn, i, args, error_queue): # prctl(2) is a Linux specific system call. # On other systems the following function call has no effect. # This is set to ensure that non-daemonic child processes can # terminate if their parent terminates before they do. _prctl_pr_set_pdeathsig(signal.SIGINT) try: fn(i, *args) except KeyboardInterrupt: pass # SIGINT; Killed by parent, do nothing except Exception: # Propagate exception to parent process, keeping original traceback import traceback error_queue.put(traceback.format_exc()) sys.exit(1) # Multiprocessing contexts are introduced at Python 3.4 _supports_context = sys.version_info >= (3, 4) def _python_version_check(): if not _supports_context: raise RuntimeError("Requires python 3.4 or higher to use " "torch.multiprocessing.spawn and " "torch.multiprocessing.ProcessContext helper " "to launch multiple processes. If you are using " "this for distributed training and have a lower " "version of python, please use " "torch.distributed.launch instead.") class ProcessContext: def __init__(self, processes, error_queues): _python_version_check() self.error_queues = error_queues self.processes = processes self.sentinels = { process.sentinel: index for index, process in enumerate(processes) } def pids(self): return [int(process.pid) for process in self.processes] def join(self, timeout=None): r""" Tries to join one or more processes in this spawn context. If one of them exited with a non-zero exit status, this function kills the remaining processes and raises an exception with the cause of the first process exiting. Returns ``True`` if all processes have been joined successfully, ``False`` if there are more processes that need to be joined. Arguments: timeout (float): Wait this long before giving up on waiting. """ # Ensure this function can be called even when we're done. if len(self.sentinels) == 0: return True # Wait for any process to fail or all of them to succeed. ready = multiprocessing.connection.wait( self.sentinels.keys(), timeout=timeout, ) error_index = None for sentinel in ready: index = self.sentinels.pop(sentinel) process = self.processes[index] process.join() if process.exitcode != 0: error_index = index break # Return if there was no error. if error_index is None: # Return whether or not all processes have been joined. return len(self.sentinels) == 0 # Assume failure. Terminate processes that are still alive. for process in self.processes: if process.is_alive(): process.terminate() process.join() # There won't be an error on the queue if the process crashed. if self.error_queues[error_index].empty(): exitcode = self.processes[error_index].exitcode if exitcode < 0: name = signal.Signals(-exitcode).name raise Exception( "process %d terminated with signal %s" % (error_index, name) ) else: raise Exception( "process %d terminated with exit code %d" % (error_index, exitcode) ) original_trace = self.error_queues[error_index].get() msg = "\n\n-- Process %d terminated with the following error:\n" % error_index msg += original_trace raise Exception(msg) class SpawnContext(ProcessContext): def __init__(self, processes, error_queues): warnings.warn('SpawnContext is renamed to ProcessContext since 1.4 release.') super(SpawnContext, self).__init__(self, processes, error_queues) pass # Note: [start_processes] # mp.start_processes handles both start_method='spawn' and 'fork'. It's supposed to be a # more generalized API than mp.spawn. Currently we only document mp.spawn as it's the # CUDA compatible start_method. However, in environments like Ipython notebooks, 'fork' # works better than 'spawn'. Every helper function we created for mp.spawn is indeed # general enough, and backends like XLA can reuse them in Colab notebooks as well. # Currently we only add this API first, we can consider adding it to documentation as # needed in the future. def start_processes(fn, args=(), nprocs=1, join=True, daemon=False, start_method='spawn'): _python_version_check() mp = multiprocessing.get_context(start_method) error_queues = [] processes = [] for i in range(nprocs): error_queue = mp.SimpleQueue() process = mp.Process( target=_wrap, args=(fn, i, args, error_queue), daemon=daemon, ) process.start() error_queues.append(error_queue) processes.append(process) context = ProcessContext(processes, error_queues) if not join: return context # Loop on join until it returns True or raises an exception. while not context.join(): pass def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method='spawn'): r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``. If one of the processes exits with a non-zero exit status, the remaining processes are killed and an exception is raised with the cause of termination. In the case an exception was caught in the child process, it is forwarded and its traceback is included in the exception raised in the parent process. Arguments: fn (function): Function is called as the entrypoint of the spawned process. This function must be defined at the top level of a module so it can be pickled and spawned. This is a requirement imposed by multiprocessing. The function is called as ``fn(i, *args)``, where ``i`` is the process index and ``args`` is the passed through tuple of arguments. args (tuple): Arguments passed to ``fn``. nprocs (int): Number of processes to spawn. join (bool): Perform a blocking join on all processes. daemon (bool): The spawned processes' daemon flag. If set to True, daemonic processes will be created. start_method (string): (deprecated) this method will always use ``spawn`` as the start method. To use a different start method use ``start_processes()``. Returns: None if ``join`` is ``True``, :class:`~ProcessContext` if ``join`` is ``False`` """ if start_method != 'spawn': msg = ('This method only supports start_method=spawn (got: %s).\n' 'To use a different start_method use:\n\t\t' ' torch.multiprocessing.start_process(...)' % start_method) warnings.warn(msg) return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/multiprocessing/spawn.py
0.562657
0.169028
spawn.py
pypi
import torch import functools import inspect class _DecoratorContextManager: """Allow a context manager to be used as a decorator""" def __call__(self, func): if inspect.isgeneratorfunction(func): return self._wrap_generator(func) @functools.wraps(func) def decorate_context(*args, **kwargs): with self: return func(*args, **kwargs) return decorate_context def _wrap_generator(self, func): """Wrap each generator invocation with the context manager""" @functools.wraps(func) def generator_context(*args, **kwargs): gen = func(*args, **kwargs) while True: try: with self: x = next(gen) yield x except StopIteration: break return generator_context class no_grad(_DecoratorContextManager): r"""Context-manager that disabled gradient calculation. Disabling gradient calculation is useful for inference, when you are sure that you will not call :meth:`Tensor.backward()`. It will reduce memory consumption for computations that would otherwise have `requires_grad=True`. In this mode, the result of every computation will have `requires_grad=False`, even when the inputs have `requires_grad=True`. This mode has no effect when using :class:`~enable_grad` context manager . This context manager is thread local; it will not affect computation in other threads. Also functions as a decorator. (Make sure to instantiate with parenthesis.) Example:: >>> x = torch.tensor([1], requires_grad=True) >>> with torch.no_grad(): ... y = x * 2 >>> y.requires_grad False >>> @torch.no_grad() ... def doubler(x): ... return x * 2 >>> z = doubler(x) >>> z.requires_grad False """ def __enter__(self): self.prev = torch.is_grad_enabled() torch._C.set_grad_enabled(False) def __exit__(self, *args): torch.set_grad_enabled(self.prev) return False class enable_grad(_DecoratorContextManager): r"""Context-manager that enables gradient calculation. Enables gradient calculation, if it has been disabled via :class:`~no_grad` or :class:`~set_grad_enabled`. This context manager is thread local; it will not affect computation in other threads. Also functions as a decorator. (Make sure to instantiate with parenthesis.) Example:: >>> x = torch.tensor([1], requires_grad=True) >>> with torch.no_grad(): ... with torch.enable_grad(): ... y = x * 2 >>> y.requires_grad True >>> y.backward() >>> x.grad >>> @torch.enable_grad() ... def doubler(x): ... return x * 2 >>> with torch.no_grad(): ... z = doubler(x) >>> z.requires_grad True """ def __enter__(self): self.prev = torch.is_grad_enabled() torch._C.set_grad_enabled(True) def __exit__(self, *args): torch.set_grad_enabled(self.prev) return False class set_grad_enabled(object): r"""Context-manager that sets gradient calculation to on or off. ``set_grad_enabled`` will enable or disable grads based on its argument :attr:`mode`. It can be used as a context-manager or as a function. When using :class:`~enable_grad` context manager, :class:`~set_grad_enabled(False)` has no effect. This context manager is thread local; it will not affect computation in other threads. Arguments: mode (bool): Flag whether to enable grad (``True``), or disable (``False``). This can be used to conditionally enable gradients. Example:: >>> x = torch.tensor([1], requires_grad=True) >>> is_train = False >>> with torch.set_grad_enabled(is_train): ... y = x * 2 >>> y.requires_grad False >>> torch.set_grad_enabled(True) >>> y = x * 2 >>> y.requires_grad True >>> torch.set_grad_enabled(False) >>> y = x * 2 >>> y.requires_grad False """ def __init__(self, mode): self.prev = torch.is_grad_enabled() torch._C.set_grad_enabled(mode) def __enter__(self): pass def __exit__(self, *args): torch.set_grad_enabled(self.prev) return False
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/autograd/grad_mode.py
0.857709
0.410993
grad_mode.py
pypi
import torch import warnings class detect_anomaly(object): r"""Context-manager that enable anomaly detection for the autograd engine. This does two things: - Running the forward pass with detection enabled will allow the backward pass to print the traceback of the forward operation that created the failing backward function. - Any backward computation that generate "nan" value will raise an error. .. warning:: This mode should be enabled only for debugging as the different tests will slow down your program execution. Example: >>> import torch >>> from torch import autograd >>> class MyFunc(autograd.Function): ... @staticmethod ... def forward(ctx, inp): ... return inp.clone() ... @staticmethod ... def backward(ctx, gO): ... # Error during the backward pass ... raise RuntimeError("Some error in backward") ... return gO.clone() >>> def run_fn(a): ... out = MyFunc.apply(a) ... return out.sum() >>> inp = torch.rand(10, 10, requires_grad=True) >>> out = run_fn(inp) >>> out.backward() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/your/pytorch/install/torch/tensor.py", line 93, in backward torch.autograd.backward(self, gradient, retain_graph, create_graph) File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward allow_unreachable=True) # allow_unreachable flag File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply return self._forward_cls.backward(self, *args) File "<stdin>", line 8, in backward RuntimeError: Some error in backward >>> with autograd.detect_anomaly(): ... inp = torch.rand(10, 10, requires_grad=True) ... out = run_fn(inp) ... out.backward() Traceback of forward call that caused the error: File "tmp.py", line 53, in <module> out = run_fn(inp) File "tmp.py", line 44, in run_fn out = MyFunc.apply(a) Traceback (most recent call last): File "<stdin>", line 4, in <module> File "/your/pytorch/install/torch/tensor.py", line 93, in backward torch.autograd.backward(self, gradient, retain_graph, create_graph) File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward allow_unreachable=True) # allow_unreachable flag File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply return self._forward_cls.backward(self, *args) File "<stdin>", line 8, in backward RuntimeError: Some error in backward """ def __init__(self): self.prev = torch.is_anomaly_enabled() warnings.warn('Anomaly Detection has been enabled. ' 'This mode will increase the runtime ' 'and should only be enabled for debugging.') def __enter__(self): torch.set_anomaly_enabled(True) def __exit__(self, *args): torch.set_anomaly_enabled(self.prev) return False class set_detect_anomaly(object): r"""Context-manager that sets the anomaly detection for the autograd engine on or off. ``set_detect_anomaly`` will enable or disable the autograd anomaly detection based on its argument :attr:`mode`. It can be used as a context-manager or as a function. See ``detect_anomaly`` above for details of the anomaly detection behaviour. Arguments: mode (bool): Flag whether to enable anomaly detection (``True``), or disable (``False``). """ def __init__(self, mode): self.prev = torch.is_anomaly_enabled() torch.set_anomaly_enabled(mode) def __enter__(self): pass def __exit__(self, *args): torch.set_anomaly_enabled(self.prev) return False
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/autograd/anomaly_mode.py
0.842604
0.465873
anomaly_mode.py
pypi
from functools import reduce def maybe_view(tensor, size, check_same_size=True): if check_same_size and tensor.size() == size: return tensor return tensor.contiguous().view(size) def maybe_unexpand(tensor, old_size, check_same_size=True): if check_same_size and tensor.size() == old_size: return tensor num_unsqueezed = tensor.dim() - len(old_size) expanded_dims = [dim for dim, (expanded, original) in enumerate(zip(tensor.size()[num_unsqueezed:], old_size)) if expanded != original] for _ in range(num_unsqueezed): tensor = tensor.sum(0, keepdim=False) for dim in expanded_dims: tensor = tensor.sum(dim, keepdim=True) return tensor # Check whether the op enable broadcasting, and whether it is supported by ONNX. # If dims1 and dims2 are different, then broadcast is True. # We always assume the combination of dims1 and dims2 is broadcastable. # The following types of broadcasting are supported in ONNX: # 1) Only one element in dims2, such as dims2 = [1, 1] # 2) dims2 is suffix of dims1, such as dims1 = [2, 3, 4], and dims2 = [3, 4] # Details can be found here: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm def check_onnx_broadcast(dims1, dims2): broadcast = False supported = True len1 = len(dims1) len2 = len(dims2) numel1 = reduce(lambda x, y: x * y, dims1) numel2 = reduce(lambda x, y: x * y, dims2) if len1 < len2: broadcast = True if numel2 != 1: supported = False elif len1 > len2: broadcast = True if numel2 != 1 and dims1[len1 - len2:] != dims2: supported = False else: if dims1 != dims2: broadcast = True if numel2 != 1: supported = False if not supported: raise ValueError("Numpy style broadcasting is not supported in ONNX. " "Input dims are: {}, {}".format(dims1, dims2)) return broadcast
/rpi_torch-1.5.0-cp37-cp37m-linux_armv7l.whl/torch/autograd/_functions/utils.py
0.827967
0.66266
utils.py
pypi
import warnings from torchvision import models from torchvision import datasets from torchvision import ops from torchvision import transforms from torchvision import utils from torchvision import io from .extension import _HAS_OPS import torch try: from .version import __version__ # noqa: F401 except ImportError: pass _image_backend = 'PIL' _video_backend = "pyav" def set_image_backend(backend): """ Specifies the package used to load images. Args: backend (string): Name of the image backend. one of {'PIL', 'accimage'}. The :mod:`accimage` package uses the Intel IPP library. It is generally faster than PIL, but does not support as many operations. """ global _image_backend if backend not in ['PIL', 'accimage']: raise ValueError("Invalid backend '{}'. Options are 'PIL' and 'accimage'" .format(backend)) _image_backend = backend def get_image_backend(): """ Gets the name of the package used to load images """ return _image_backend def set_video_backend(backend): """ Specifies the package used to decode videos. Args: backend (string): Name of the video backend. one of {'pyav', 'video_reader'}. The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic binding for the FFmpeg libraries. The :mod:`video_reader` package includes a native C++ implementation on top of FFMPEG libraries, and a python API of TorchScript custom operator. It is generally decoding faster than :mod:`pyav`, but perhaps is less robust. """ global _video_backend if backend not in ["pyav", "video_reader"]: raise ValueError( "Invalid video backend '%s'. Options are 'pyav' and 'video_reader'" % backend ) if backend == "video_reader" and not io._HAS_VIDEO_OPT: warnings.warn("video_reader video backend is not available") else: _video_backend = backend def get_video_backend(): return _video_backend def _is_tracing(): return torch._C._get_tracing_state()
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/__init__.py
0.894427
0.361841
__init__.py
pypi
import numbers import random from torchvision.transforms import ( RandomCrop, RandomResizedCrop, ) from . import _functional_video as F __all__ = [ "RandomCropVideo", "RandomResizedCropVideo", "CenterCropVideo", "NormalizeVideo", "ToTensorVideo", "RandomHorizontalFlipVideo", ] class RandomCropVideo(RandomCrop): def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: randomly cropped/resized video clip. size is (C, T, OH, OW) """ i, j, h, w = self.get_params(clip, self.size) return F.crop(clip, i, j, h, w) def __repr__(self): return self.__class__.__name__ + '(size={0})'.format(self.size) class RandomResizedCropVideo(RandomResizedCrop): def __init__( self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation_mode="bilinear", ): if isinstance(size, tuple): assert len(size) == 2, "size should be tuple (height, width)" self.size = size else: self.size = (size, size) self.interpolation_mode = interpolation_mode self.scale = scale self.ratio = ratio def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: randomly cropped/resized video clip. size is (C, T, H, W) """ i, j, h, w = self.get_params(clip, self.scale, self.ratio) return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) def __repr__(self): return self.__class__.__name__ + \ '(size={0}, interpolation_mode={1}, scale={2}, ratio={3})'.format( self.size, self.interpolation_mode, self.scale, self.ratio ) class CenterCropVideo(object): def __init__(self, crop_size): if isinstance(crop_size, numbers.Number): self.crop_size = (int(crop_size), int(crop_size)) else: self.crop_size = crop_size def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: central cropping of video clip. Size is (C, T, crop_size, crop_size) """ return F.center_crop(clip, self.crop_size) def __repr__(self): return self.__class__.__name__ + '(crop_size={0})'.format(self.crop_size) class NormalizeVideo(object): """ Normalize the video clip by mean subtraction and division by standard deviation Args: mean (3-tuple): pixel RGB mean std (3-tuple): pixel RGB standard deviation inplace (boolean): whether do in-place normalization """ def __init__(self, mean, std, inplace=False): self.mean = mean self.std = std self.inplace = inplace def __call__(self, clip): """ Args: clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W) """ return F.normalize(clip, self.mean, self.std, self.inplace) def __repr__(self): return self.__class__.__name__ + '(mean={0}, std={1}, inplace={2})'.format( self.mean, self.std, self.inplace) class ToTensorVideo(object): """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimenions of clip tensor """ def __init__(self): pass def __call__(self, clip): """ Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) Return: clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) """ return F.to_tensor(clip) def __repr__(self): return self.__class__.__name__ class RandomHorizontalFlipVideo(object): """ Flip the video clip along the horizonal direction with a given probability Args: p (float): probability of the clip being flipped. Default value is 0.5 """ def __init__(self, p=0.5): self.p = p def __call__(self, clip): """ Args: clip (torch.tensor): Size is (C, T, H, W) Return: clip (torch.tensor): Size is (C, T, H, W) """ if random.random() < self.p: clip = F.hflip(clip) return clip def __repr__(self): return self.__class__.__name__ + "(p={0})".format(self.p)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/transforms/_transforms_video.py
0.912988
0.280719
_transforms_video.py
pypi
import torch import torchvision.transforms.functional as F from torch import Tensor from torch.jit.annotations import Optional, List, BroadcastingList2, Tuple def _is_tensor_a_torch_image(input): return len(input.shape) == 3 def vflip(img): # type: (Tensor) -> Tensor """Vertically flip the given the Image Tensor. Args: img (Tensor): Image Tensor to be flipped in the form [C, H, W]. Returns: Tensor: Vertically flipped image Tensor. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') return img.flip(-2) def hflip(img): # type: (Tensor) -> Tensor """Horizontally flip the given the Image Tensor. Args: img (Tensor): Image Tensor to be flipped in the form [C, H, W]. Returns: Tensor: Horizontally flipped image Tensor. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') return img.flip(-1) def crop(img, top, left, height, width): # type: (Tensor, int, int, int, int) -> Tensor """Crop the given Image Tensor. Args: img (Tensor): Image to be cropped in the form [C, H, W]. (0,0) denotes the top left corner of the image. top (int): Vertical component of the top left corner of the crop box. left (int): Horizontal component of the top left corner of the crop box. height (int): Height of the crop box. width (int): Width of the crop box. Returns: Tensor: Cropped image. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') return img[..., top:top + height, left:left + width] def rgb_to_grayscale(img): # type: (Tensor) -> Tensor """Convert the given RGB Image Tensor to Grayscale. For RGB to Grayscale conversion, ITU-R 601-2 luma transform is performed which is L = R * 0.2989 + G * 0.5870 + B * 0.1140 Args: img (Tensor): Image to be converted to Grayscale in the form [C, H, W]. Returns: Tensor: Grayscale image. """ if img.shape[0] != 3: raise TypeError('Input Image does not contain 3 Channels') return (0.2989 * img[0] + 0.5870 * img[1] + 0.1140 * img[2]).to(img.dtype) def adjust_brightness(img, brightness_factor): # type: (Tensor, float) -> Tensor """Adjust brightness of an RGB image. Args: img (Tensor): Image to be adjusted. brightness_factor (float): How much to adjust the brightness. Can be any non negative number. 0 gives a black image, 1 gives the original image while 2 increases the brightness by a factor of 2. Returns: Tensor: Brightness adjusted image. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') return _blend(img, torch.zeros_like(img), brightness_factor) def adjust_contrast(img, contrast_factor): # type: (Tensor, float) -> Tensor """Adjust contrast of an RGB image. Args: img (Tensor): Image to be adjusted. contrast_factor (float): How much to adjust the contrast. Can be any non negative number. 0 gives a solid gray image, 1 gives the original image while 2 increases the contrast by a factor of 2. Returns: Tensor: Contrast adjusted image. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') mean = torch.mean(rgb_to_grayscale(img).to(torch.float)) return _blend(img, mean, contrast_factor) def adjust_saturation(img, saturation_factor): # type: (Tensor, float) -> Tensor """Adjust color saturation of an RGB image. Args: img (Tensor): Image to be adjusted. saturation_factor (float): How much to adjust the saturation. 0 will give a black and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2. Returns: Tensor: Saturation adjusted image. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') return _blend(img, rgb_to_grayscale(img), saturation_factor) def center_crop(img, output_size): # type: (Tensor, BroadcastingList2[int]) -> Tensor """Crop the Image Tensor and resize it to desired size. Args: img (Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. output_size (sequence or int): (height, width) of the crop box. If int, it is used for both directions Returns: Tensor: Cropped image. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') _, image_width, image_height = img.size() crop_height, crop_width = output_size crop_top = int(round((image_height - crop_height) / 2.)) crop_left = int(round((image_width - crop_width) / 2.)) return crop(img, crop_top, crop_left, crop_height, crop_width) def five_crop(img, size): # type: (Tensor, BroadcastingList2[int]) -> List[Tensor] """Crop the given Image Tensor into four corners and the central crop. .. Note:: This transform returns a List of Tensors and there may be a mismatch in the number of inputs and targets your ``Dataset`` returns. Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. Returns: List: List (tl, tr, bl, br, center) Corresponding top left, top right, bottom left, bottom right and center crop. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') assert len(size) == 2, "Please provide only two dimensions (h, w) for size." _, image_width, image_height = img.size() crop_height, crop_width = size if crop_width > image_width or crop_height > image_height: msg = "Requested crop size {} is bigger than input size {}" raise ValueError(msg.format(size, (image_height, image_width))) tl = crop(img, 0, 0, crop_width, crop_height) tr = crop(img, image_width - crop_width, 0, image_width, crop_height) bl = crop(img, 0, image_height - crop_height, crop_width, image_height) br = crop(img, image_width - crop_width, image_height - crop_height, image_width, image_height) center = center_crop(img, (crop_height, crop_width)) return [tl, tr, bl, br, center] def ten_crop(img, size, vertical_flip=False): # type: (Tensor, BroadcastingList2[int], bool) -> List[Tensor] """Crop the given Image Tensor into four corners and the central crop plus the flipped version of these (horizontal flipping is used by default). .. Note:: This transform returns a List of images and there may be a mismatch in the number of inputs and targets your ``Dataset`` returns. Args: size (sequence or int): Desired output size of the crop. If size is an int instead of sequence like (h, w), a square crop (size, size) is made. vertical_flip (bool): Use vertical flipping instead of horizontal Returns: List: List (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip) Corresponding top left, top right, bottom left, bottom right and center crop and same for the flipped image's tensor. """ if not _is_tensor_a_torch_image(img): raise TypeError('tensor is not a torch image.') assert len(size) == 2, "Please provide only two dimensions (h, w) for size." first_five = five_crop(img, size) if vertical_flip: img = vflip(img) else: img = hflip(img) second_five = five_crop(img, size) return first_five + second_five def _blend(img1, img2, ratio): # type: (Tensor, Tensor, float) -> Tensor bound = 1 if img1.dtype in [torch.half, torch.float32, torch.float64] else 255 return (ratio * img1 + (1 - ratio) * img2).clamp(0, bound).to(img1.dtype)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/transforms/functional_tensor.py
0.962108
0.916633
functional_tensor.py
pypi
import torch def _is_tensor_video_clip(clip): if not torch.is_tensor(clip): raise TypeError("clip should be Tesnor. Got %s" % type(clip)) if not clip.ndimension() == 4: raise ValueError("clip should be 4D. Got %dD" % clip.dim()) return True def crop(clip, i, j, h, w): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) """ assert len(clip.size()) == 4, "clip should be a 4D tensor" return clip[..., i:i + h, j:j + w] def resize(clip, target_size, interpolation_mode): assert len(target_size) == 2, "target size should be tuple (height, width)" return torch.nn.functional.interpolate( clip, size=target_size, mode=interpolation_mode ) def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): """ Do spatial cropping and resizing to the video clip Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped region. w (int): Width of the cropped region. size (tuple(int, int)): height and width of resized clip Returns: clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W) """ assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" clip = crop(clip, i, j, h, w) clip = resize(clip, size, interpolation_mode) return clip def center_crop(clip, crop_size): assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" h, w = clip.size(-2), clip.size(-1) th, tw = crop_size assert h >= th and w >= tw, "height and width must be no smaller than crop_size" i = int(round((h - th) / 2.0)) j = int(round((w - tw) / 2.0)) return crop(clip, i, j, th, tw) def to_tensor(clip): """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimenions of clip tensor Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) Return: clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) """ _is_tensor_video_clip(clip) if not clip.dtype == torch.uint8: raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype)) return clip.float().permute(3, 0, 1, 2) / 255.0 def normalize(clip, mean, std, inplace=False): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) mean (tuple): pixel RGB mean. Size is (3) std (tuple): pixel standard deviation. Size is (3) Returns: normalized clip (torch.tensor): Size is (C, T, H, W) """ assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" if not inplace: clip = clip.clone() mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) return clip def hflip(clip): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) Returns: flipped clip (torch.tensor): Size is (C, T, H, W) """ assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" return clip.flip((-1))
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/transforms/_functional_video.py
0.91191
0.733667
_functional_video.py
pypi
import torch import torch.nn as nn import torch.nn.init as init from .utils import load_state_dict_from_url __all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1'] model_urls = { 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', } class Fire(nn.Module): def __init__(self, inplanes, squeeze_planes, expand1x1_planes, expand3x3_planes): super(Fire, self).__init__() self.inplanes = inplanes self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) self.squeeze_activation = nn.ReLU(inplace=True) self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1) self.expand1x1_activation = nn.ReLU(inplace=True) self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1) self.expand3x3_activation = nn.ReLU(inplace=True) def forward(self, x): x = self.squeeze_activation(self.squeeze(x)) return torch.cat([ self.expand1x1_activation(self.expand1x1(x)), self.expand3x3_activation(self.expand3x3(x)) ], 1) class SqueezeNet(nn.Module): def __init__(self, version='1_0', num_classes=1000): super(SqueezeNet, self).__init__() self.num_classes = num_classes if version == '1_0': self.features = nn.Sequential( nn.Conv2d(3, 96, kernel_size=7, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(96, 16, 64, 64), Fire(128, 16, 64, 64), Fire(128, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 32, 128, 128), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(512, 64, 256, 256), ) elif version == '1_1': self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(64, 16, 64, 64), Fire(128, 16, 64, 64), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(128, 32, 128, 128), Fire(256, 32, 128, 128), nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), Fire(256, 48, 192, 192), Fire(384, 48, 192, 192), Fire(384, 64, 256, 256), Fire(512, 64, 256, 256), ) else: # FIXME: Is this needed? SqueezeNet should only be called from the # FIXME: squeezenet1_x() functions # FIXME: This checking is not done for the other models raise ValueError("Unsupported SqueezeNet version {version}:" "1_0 or 1_1 expected".format(version=version)) # Final convolution is initialized differently from the rest final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) self.classifier = nn.Sequential( nn.Dropout(p=0.5), final_conv, nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d((1, 1)) ) for m in self.modules(): if isinstance(m, nn.Conv2d): if m is final_conv: init.normal_(m.weight, mean=0.0, std=0.01) else: init.kaiming_uniform_(m.weight) if m.bias is not None: init.constant_(m.bias, 0) def forward(self, x): x = self.features(x) x = self.classifier(x) return torch.flatten(x, 1) def _squeezenet(version, pretrained, progress, **kwargs): model = SqueezeNet(version, **kwargs) if pretrained: arch = 'squeezenet' + version state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model def squeezenet1_0(pretrained=False, progress=True, **kwargs): r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _squeezenet('1_0', pretrained, progress, **kwargs) def squeezenet1_1(pretrained=False, progress=True, **kwargs): r"""SqueezeNet 1.1 model from the `official SqueezeNet repo <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_. SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters than SqueezeNet 1.0, without sacrificing accuracy. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _squeezenet('1_1', pretrained, progress, **kwargs)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/squeezenet.py
0.899195
0.450903
squeezenet.py
pypi
from collections import namedtuple import warnings import torch import torch.nn as nn import torch.nn.functional as F from torch.jit.annotations import Optional from torch import Tensor from .utils import load_state_dict_from_url __all__ = ['Inception3', 'inception_v3', 'InceptionOutputs', '_InceptionOutputs'] model_urls = { # Inception v3 ported from TensorFlow 'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth', } InceptionOutputs = namedtuple('InceptionOutputs', ['logits', 'aux_logits']) InceptionOutputs.__annotations__ = {'logits': torch.Tensor, 'aux_logits': Optional[torch.Tensor]} # Script annotations failed with _GoogleNetOutputs = namedtuple ... # _InceptionOutputs set here for backwards compat _InceptionOutputs = InceptionOutputs def inception_v3(pretrained=False, progress=True, **kwargs): r"""Inception v3 model architecture from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. .. note:: **Important**: In contrast to the other models the inception_v3 expects tensors with a size of N x 3 x 299 x 299, so ensure your images are sized accordingly. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr aux_logits (bool): If True, add an auxiliary branch that can improve training. Default: *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False* """ if pretrained: if 'transform_input' not in kwargs: kwargs['transform_input'] = True if 'aux_logits' in kwargs: original_aux_logits = kwargs['aux_logits'] kwargs['aux_logits'] = True else: original_aux_logits = True model = Inception3(**kwargs) state_dict = load_state_dict_from_url(model_urls['inception_v3_google'], progress=progress) model.load_state_dict(state_dict) if not original_aux_logits: model.aux_logits = False del model.AuxLogits return model return Inception3(**kwargs) class Inception3(nn.Module): def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, inception_blocks=None, init_weights=True): super(Inception3, self).__init__() if inception_blocks is None: inception_blocks = [ BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux ] assert len(inception_blocks) == 7 conv_block = inception_blocks[0] inception_a = inception_blocks[1] inception_b = inception_blocks[2] inception_c = inception_blocks[3] inception_d = inception_blocks[4] inception_e = inception_blocks[5] inception_aux = inception_blocks[6] self.aux_logits = aux_logits self.transform_input = transform_input self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3) self.Mixed_5b = inception_a(192, pool_features=32) self.Mixed_5c = inception_a(256, pool_features=64) self.Mixed_5d = inception_a(288, pool_features=64) self.Mixed_6a = inception_b(288) self.Mixed_6b = inception_c(768, channels_7x7=128) self.Mixed_6c = inception_c(768, channels_7x7=160) self.Mixed_6d = inception_c(768, channels_7x7=160) self.Mixed_6e = inception_c(768, channels_7x7=192) if aux_logits: self.AuxLogits = inception_aux(768, num_classes) self.Mixed_7a = inception_d(768) self.Mixed_7b = inception_e(1280) self.Mixed_7c = inception_e(2048) self.fc = nn.Linear(2048, num_classes) if init_weights: for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) values = values.view(m.weight.size()) with torch.no_grad(): m.weight.copy_(values) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _transform_input(self, x): if self.transform_input: x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 x = torch.cat((x_ch0, x_ch1, x_ch2), 1) return x def _forward(self, x): # N x 3 x 299 x 299 x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149 x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147 x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147 x = F.max_pool2d(x, kernel_size=3, stride=2) # N x 64 x 73 x 73 x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73 x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71 x = F.max_pool2d(x, kernel_size=3, stride=2) # N x 192 x 35 x 35 x = self.Mixed_5b(x) # N x 256 x 35 x 35 x = self.Mixed_5c(x) # N x 288 x 35 x 35 x = self.Mixed_5d(x) # N x 288 x 35 x 35 x = self.Mixed_6a(x) # N x 768 x 17 x 17 x = self.Mixed_6b(x) # N x 768 x 17 x 17 x = self.Mixed_6c(x) # N x 768 x 17 x 17 x = self.Mixed_6d(x) # N x 768 x 17 x 17 x = self.Mixed_6e(x) # N x 768 x 17 x 17 aux_defined = self.training and self.aux_logits if aux_defined: aux = self.AuxLogits(x) else: aux = None # N x 768 x 17 x 17 x = self.Mixed_7a(x) # N x 1280 x 8 x 8 x = self.Mixed_7b(x) # N x 2048 x 8 x 8 x = self.Mixed_7c(x) # N x 2048 x 8 x 8 # Adaptive average pooling x = F.adaptive_avg_pool2d(x, (1, 1)) # N x 2048 x 1 x 1 x = F.dropout(x, training=self.training) # N x 2048 x 1 x 1 x = torch.flatten(x, 1) # N x 2048 x = self.fc(x) # N x 1000 (num_classes) return x, aux @torch.jit.unused def eager_outputs(self, x, aux): # type: (Tensor, Optional[Tensor]) -> InceptionOutputs if self.training and self.aux_logits: return InceptionOutputs(x, aux) else: return x def forward(self, x): x = self._transform_input(x) x, aux = self._forward(x) aux_defined = self.training and self.aux_logits if torch.jit.is_scripting(): if not aux_defined: warnings.warn("Scripted Inception3 always returns Inception3 Tuple") return InceptionOutputs(x, aux) else: return self.eager_outputs(x, aux) class InceptionA(nn.Module): def __init__(self, in_channels, pool_features, conv_block=None): super(InceptionA, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionB(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionB, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) def _forward(self, x): branch3x3 = self.branch3x3(x) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionC(nn.Module): def __init__(self, in_channels, channels_7x7, conv_block=None): super(InceptionC, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) c7 = channels_7x7 self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch7x7 = self.branch7x7_1(x) branch7x7 = self.branch7x7_2(branch7x7) branch7x7 = self.branch7x7_3(branch7x7) branch7x7dbl = self.branch7x7dbl_1(x) branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionD(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionD, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) def _forward(self, x): branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch7x7x3 = self.branch7x7x3_1(x) branch7x7x3 = self.branch7x7x3_2(branch7x7x3) branch7x7x3 = self.branch7x7x3_3(branch7x7x3) branch7x7x3 = self.branch7x7x3_4(branch7x7x3) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch7x7x3, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionE(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionE, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3), ] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = torch.cat(branch3x3dbl, 1) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes, conv_block=None): super(InceptionAux, self).__init__() if conv_block is None: conv_block = BasicConv2d self.conv0 = conv_block(in_channels, 128, kernel_size=1) self.conv1 = conv_block(128, 768, kernel_size=5) self.conv1.stddev = 0.01 self.fc = nn.Linear(768, num_classes) self.fc.stddev = 0.001 def forward(self, x): # N x 768 x 17 x 17 x = F.avg_pool2d(x, kernel_size=5, stride=3) # N x 768 x 5 x 5 x = self.conv0(x) # N x 128 x 5 x 5 x = self.conv1(x) # N x 768 x 1 x 1 # Adaptive average pooling x = F.adaptive_avg_pool2d(x, (1, 1)) # N x 768 x 1 x 1 x = torch.flatten(x, 1) # N x 768 x = self.fc(x) # N x 1000 return x class BasicConv2d(nn.Module): def __init__(self, in_channels, out_channels, **kwargs): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.conv(x) x = self.bn(x) return F.relu(x, inplace=True)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/inception.py
0.948466
0.600716
inception.py
pypi
import re import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from collections import OrderedDict from .utils import load_state_dict_from_url from torch import Tensor from torch.jit.annotations import List __all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161'] model_urls = { 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth', 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth', } class _DenseLayer(nn.Module): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False): super(_DenseLayer, self).__init__() self.add_module('norm1', nn.BatchNorm2d(num_input_features)), self.add_module('relu1', nn.ReLU(inplace=True)), self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), self.add_module('relu2', nn.ReLU(inplace=True)), self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), self.drop_rate = float(drop_rate) self.memory_efficient = memory_efficient def bn_function(self, inputs): # type: (List[Tensor]) -> Tensor concated_features = torch.cat(inputs, 1) bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484 return bottleneck_output # todo: rewrite when torchscript supports any def any_requires_grad(self, input): # type: (List[Tensor]) -> bool for tensor in input: if tensor.requires_grad: return True return False @torch.jit.unused # noqa: T484 def call_checkpoint_bottleneck(self, input): # type: (List[Tensor]) -> Tensor def closure(*inputs): return self.bn_function(*inputs) return cp.checkpoint(closure, input) @torch.jit._overload_method # noqa: F811 def forward(self, input): # type: (List[Tensor]) -> (Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, input): # type: (Tensor) -> (Tensor) pass # torchscript does not yet support *args, so we overload method # allowing it to take either a List[Tensor] or single Tensor def forward(self, input): # noqa: F811 if isinstance(input, Tensor): prev_features = [input] else: prev_features = input if self.memory_efficient and self.any_requires_grad(prev_features): if torch.jit.is_scripting(): raise Exception("Memory Efficient not supported in JIT") bottleneck_output = self.call_checkpoint_bottleneck(prev_features) else: bottleneck_output = self.bn_function(prev_features) new_features = self.conv2(self.relu2(self.norm2(bottleneck_output))) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class _DenseBlock(nn.ModuleDict): _version = 2 def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, memory_efficient=memory_efficient, ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features): features = [init_features] for name, layer in self.items(): new_features = layer(features) features.append(new_features) return torch.cat(features, 1) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features): super(_Transition, self).__init__() self.add_module('norm', nn.BatchNorm2d(num_input_features)) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) class DenseNet(nn.Module): r"""Densenet-BC model class, based on `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: growth_rate (int) - how many filters to add each layer (`k` in paper) block_config (list of 4 ints) - how many layers in each pooling block num_init_features (int) - the number of filters to learn in the first convolution layer bn_size (int) - multiplicative factor for number of bottle neck layers (i.e. bn_size * k features in the bottleneck layer) drop_rate (float) - dropout rate after each dense layer num_classes (int) - number of classification classes memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ """ def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False): super(DenseNet, self).__init__() # First convolution self.features = nn.Sequential(OrderedDict([ ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', nn.BatchNorm2d(num_init_features)), ('relu0', nn.ReLU(inplace=True)), ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), ])) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock( num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, memory_efficient=memory_efficient ) self.features.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) self.features.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final batch norm self.features.add_module('norm5', nn.BatchNorm2d(num_features)) # Linear layer self.classifier = nn.Linear(num_features, num_classes) # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) def forward(self, x): features = self.features(x) out = F.relu(features, inplace=True) out = F.adaptive_avg_pool2d(out, (1, 1)) out = torch.flatten(out, 1) out = self.classifier(out) return out def _load_state_dict(model, model_url, progress): # '.'s are no longer allowed in module names, but previous _DenseLayer # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. # They are also in the checkpoints in model_urls. This pattern is used # to find such keys. pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') state_dict = load_state_dict_from_url(model_url, progress=progress) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress, **kwargs): model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) if pretrained: _load_state_dict(model, model_urls[arch], progress) return model def densenet121(pretrained=False, progress=True, **kwargs): r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ """ return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, **kwargs) def densenet161(pretrained=False, progress=True, **kwargs): r"""Densenet-161 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ """ return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, **kwargs) def densenet169(pretrained=False, progress=True, **kwargs): r"""Densenet-169 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ """ return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, **kwargs) def densenet201(pretrained=False, progress=True, **kwargs): r"""Densenet-201 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ """ return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, **kwargs)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/densenet.py
0.856317
0.395718
densenet.py
pypi
from torch import nn from .utils import load_state_dict_from_url __all__ = ['MobileNetV2', 'mobilenet_v2'] model_urls = { 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth', } def _make_divisible(v, divisor, min_value=None): """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < 0.9 * v: new_v += divisor return new_v class ConvBNReLU(nn.Sequential): def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): padding = (kernel_size - 1) // 2 super(ConvBNReLU, self).__init__( nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), nn.BatchNorm2d(out_planes), nn.ReLU6(inplace=True) ) class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.stride = stride assert stride in [1, 2] hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup layers = [] if expand_ratio != 1: # pw layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) layers.extend([ # dw ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), # pw-linear nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ]) self.conv = nn.Sequential(*layers) def forward(self, x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) class MobileNetV2(nn.Module): def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8, block=None): """ MobileNet V2 main class Args: num_classes (int): Number of classes width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount inverted_residual_setting: Network structure round_nearest (int): Round the number of channels in each layer to be a multiple of this number Set to 1 to turn off rounding block: Module specifying inverted residual building block for mobilenet """ super(MobileNetV2, self).__init__() if block is None: block = InvertedResidual input_channel = 32 last_channel = 1280 if inverted_residual_setting is None: inverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1], ] # only check the first element, assuming user knows t,c,n,s are required if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: raise ValueError("inverted_residual_setting should be non-empty " "or a 4-element list, got {}".format(inverted_residual_setting)) # building first layer input_channel = _make_divisible(input_channel * width_mult, round_nearest) self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) features = [ConvBNReLU(3, input_channel, stride=2)] # building inverted residual blocks for t, c, n, s in inverted_residual_setting: output_channel = _make_divisible(c * width_mult, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append(block(input_channel, output_channel, stride, expand_ratio=t)) input_channel = output_channel # building last several layers features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) # make it nn.Sequential self.features = nn.Sequential(*features) # building classifier self.classifier = nn.Sequential( nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes), ) # weight initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out') if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def _forward_impl(self, x): # This exists since TorchScript doesn't support inheritance, so the superclass method # (this one) needs to have a name other than `forward` that can be accessed in a subclass x = self.features(x) # Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0] x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1) x = self.classifier(x) return x def forward(self, x): return self._forward_impl(x) def mobilenet_v2(pretrained=False, progress=True, **kwargs): """ Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MobileNetV2(**kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], progress=progress) model.load_state_dict(state_dict) return model
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/mobilenet.py
0.947539
0.446917
mobilenet.py
pypi
import warnings from collections import namedtuple import torch import torch.nn as nn import torch.nn.functional as F from torch.jit.annotations import Optional, Tuple from torch import Tensor from .utils import load_state_dict_from_url __all__ = ['GoogLeNet', 'googlenet', "GoogLeNetOutputs", "_GoogLeNetOutputs"] model_urls = { # GoogLeNet ported from TensorFlow 'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth', } GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1']) GoogLeNetOutputs.__annotations__ = {'logits': Tensor, 'aux_logits2': Optional[Tensor], 'aux_logits1': Optional[Tensor]} # Script annotations failed with _GoogleNetOutputs = namedtuple ... # _GoogLeNetOutputs set here for backwards compat _GoogLeNetOutputs = GoogLeNetOutputs def googlenet(pretrained=False, progress=True, **kwargs): r"""GoogLeNet (Inception v1) model architecture from `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr aux_logits (bool): If True, adds two auxiliary branches that can improve training. Default: *False* when pretrained is True otherwise *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False* """ if pretrained: if 'transform_input' not in kwargs: kwargs['transform_input'] = True if 'aux_logits' not in kwargs: kwargs['aux_logits'] = False if kwargs['aux_logits']: warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, ' 'so make sure to train them') original_aux_logits = kwargs['aux_logits'] kwargs['aux_logits'] = True kwargs['init_weights'] = False model = GoogLeNet(**kwargs) state_dict = load_state_dict_from_url(model_urls['googlenet'], progress=progress) model.load_state_dict(state_dict) if not original_aux_logits: model.aux_logits = False model.aux1 = None model.aux2 = None return model return GoogLeNet(**kwargs) class GoogLeNet(nn.Module): __constants__ = ['aux_logits', 'transform_input'] def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, init_weights=True, blocks=None): super(GoogLeNet, self).__init__() if blocks is None: blocks = [BasicConv2d, Inception, InceptionAux] assert len(blocks) == 3 conv_block = blocks[0] inception_block = blocks[1] inception_aux_block = blocks[2] self.aux_logits = aux_logits self.transform_input = transform_input self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3) self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.conv2 = conv_block(64, 64, kernel_size=1) self.conv3 = conv_block(64, 192, kernel_size=3, padding=1) self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32) self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64) self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64) self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64) self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64) self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64) self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128) self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128) self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128) if aux_logits: self.aux1 = inception_aux_block(512, num_classes) self.aux2 = inception_aux_block(528, num_classes) else: self.aux1 = None self.aux2 = None self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.dropout = nn.Dropout(0.2) self.fc = nn.Linear(1024, num_classes) if init_weights: self._initialize_weights() def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats X = stats.truncnorm(-2, 2, scale=0.01) values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) values = values.view(m.weight.size()) with torch.no_grad(): m.weight.copy_(values) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _transform_input(self, x): # type: (Tensor) -> Tensor if self.transform_input: x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 x = torch.cat((x_ch0, x_ch1, x_ch2), 1) return x def _forward(self, x): # type: (Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]] # N x 3 x 224 x 224 x = self.conv1(x) # N x 64 x 112 x 112 x = self.maxpool1(x) # N x 64 x 56 x 56 x = self.conv2(x) # N x 64 x 56 x 56 x = self.conv3(x) # N x 192 x 56 x 56 x = self.maxpool2(x) # N x 192 x 28 x 28 x = self.inception3a(x) # N x 256 x 28 x 28 x = self.inception3b(x) # N x 480 x 28 x 28 x = self.maxpool3(x) # N x 480 x 14 x 14 x = self.inception4a(x) # N x 512 x 14 x 14 aux1 = torch.jit.annotate(Optional[Tensor], None) if self.aux1 is not None: if self.training: aux1 = self.aux1(x) x = self.inception4b(x) # N x 512 x 14 x 14 x = self.inception4c(x) # N x 512 x 14 x 14 x = self.inception4d(x) # N x 528 x 14 x 14 aux2 = torch.jit.annotate(Optional[Tensor], None) if self.aux2 is not None: if self.training: aux2 = self.aux2(x) x = self.inception4e(x) # N x 832 x 14 x 14 x = self.maxpool4(x) # N x 832 x 7 x 7 x = self.inception5a(x) # N x 832 x 7 x 7 x = self.inception5b(x) # N x 1024 x 7 x 7 x = self.avgpool(x) # N x 1024 x 1 x 1 x = torch.flatten(x, 1) # N x 1024 x = self.dropout(x) x = self.fc(x) # N x 1000 (num_classes) return x, aux2, aux1 @torch.jit.unused def eager_outputs(self, x, aux2, aux1): # type: (Tensor, Optional[Tensor], Optional[Tensor]) -> GoogLeNetOutputs if self.training and self.aux_logits: return _GoogLeNetOutputs(x, aux2, aux1) else: return x def forward(self, x): # type: (Tensor) -> GoogLeNetOutputs x = self._transform_input(x) x, aux1, aux2 = self._forward(x) aux_defined = self.training and self.aux_logits if torch.jit.is_scripting(): if not aux_defined: warnings.warn("Scripted GoogleNet always returns GoogleNetOutputs Tuple") return GoogLeNetOutputs(x, aux2, aux1) else: return self.eager_outputs(x, aux2, aux1) class Inception(nn.Module): def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj, conv_block=None): super(Inception, self).__init__() if conv_block is None: conv_block = BasicConv2d self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1) self.branch2 = nn.Sequential( conv_block(in_channels, ch3x3red, kernel_size=1), conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1) ) self.branch3 = nn.Sequential( conv_block(in_channels, ch5x5red, kernel_size=1), # Here, kernel_size=3 instead of kernel_size=5 is a known bug. # Please see https://github.com/pytorch/vision/issues/906 for details. conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1) ) self.branch4 = nn.Sequential( nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True), conv_block(in_channels, pool_proj, kernel_size=1) ) def _forward(self, x): branch1 = self.branch1(x) branch2 = self.branch2(x) branch3 = self.branch3(x) branch4 = self.branch4(x) outputs = [branch1, branch2, branch3, branch4] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes, conv_block=None): super(InceptionAux, self).__init__() if conv_block is None: conv_block = BasicConv2d self.conv = conv_block(in_channels, 128, kernel_size=1) self.fc1 = nn.Linear(2048, 1024) self.fc2 = nn.Linear(1024, num_classes) def forward(self, x): # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 x = F.adaptive_avg_pool2d(x, (4, 4)) # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 x = self.conv(x) # N x 128 x 4 x 4 x = torch.flatten(x, 1) # N x 2048 x = F.relu(self.fc1(x), inplace=True) # N x 1024 x = F.dropout(x, 0.7, training=self.training) # N x 1024 x = self.fc2(x) # N x 1000 (num_classes) return x class BasicConv2d(nn.Module): def __init__(self, in_channels, out_channels, **kwargs): super(BasicConv2d, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.conv(x) x = self.bn(x) return F.relu(x, inplace=True)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/googlenet.py
0.906511
0.339828
googlenet.py
pypi
import torch import torch.nn as nn from .utils import load_state_dict_from_url __all__ = [ 'ShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0' ] model_urls = { 'shufflenetv2_x0.5': 'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth', 'shufflenetv2_x1.0': 'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth', 'shufflenetv2_x1.5': None, 'shufflenetv2_x2.0': None, } def channel_shuffle(x, groups): # type: (torch.Tensor, int) -> torch.Tensor batchsize, num_channels, height, width = x.data.size() channels_per_group = num_channels // groups # reshape x = x.view(batchsize, groups, channels_per_group, height, width) x = torch.transpose(x, 1, 2).contiguous() # flatten x = x.view(batchsize, -1, height, width) return x class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride): super(InvertedResidual, self).__init__() if not (1 <= stride <= 3): raise ValueError('illegal stride value') self.stride = stride branch_features = oup // 2 assert (self.stride != 1) or (inp == branch_features << 1) if self.stride > 1: self.branch1 = nn.Sequential( self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm2d(inp), nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True), ) else: self.branch1 = nn.Sequential() self.branch2 = nn.Sequential( nn.Conv2d(inp if (self.stride > 1) else branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True), self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), nn.BatchNorm2d(branch_features), nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(branch_features), nn.ReLU(inplace=True), ) @staticmethod def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False): return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i) def forward(self, x): if self.stride == 1: x1, x2 = x.chunk(2, dim=1) out = torch.cat((x1, self.branch2(x2)), dim=1) else: out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) out = channel_shuffle(out, 2) return out class ShuffleNetV2(nn.Module): def __init__(self, stages_repeats, stages_out_channels, num_classes=1000, inverted_residual=InvertedResidual): super(ShuffleNetV2, self).__init__() if len(stages_repeats) != 3: raise ValueError('expected stages_repeats as list of 3 positive ints') if len(stages_out_channels) != 5: raise ValueError('expected stages_out_channels as list of 5 positive ints') self._stage_out_channels = stages_out_channels input_channels = 3 output_channels = self._stage_out_channels[0] self.conv1 = nn.Sequential( nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True), ) input_channels = output_channels self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) stage_names = ['stage{}'.format(i) for i in [2, 3, 4]] for name, repeats, output_channels in zip( stage_names, stages_repeats, self._stage_out_channels[1:]): seq = [inverted_residual(input_channels, output_channels, 2)] for i in range(repeats - 1): seq.append(inverted_residual(output_channels, output_channels, 1)) setattr(self, name, nn.Sequential(*seq)) input_channels = output_channels output_channels = self._stage_out_channels[-1] self.conv5 = nn.Sequential( nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False), nn.BatchNorm2d(output_channels), nn.ReLU(inplace=True), ) self.fc = nn.Linear(output_channels, num_classes) def _forward_impl(self, x): # See note [TorchScript super()] x = self.conv1(x) x = self.maxpool(x) x = self.stage2(x) x = self.stage3(x) x = self.stage4(x) x = self.conv5(x) x = x.mean([2, 3]) # globalpool x = self.fc(x) return x def forward(self, x): return self._forward_impl(x) def _shufflenetv2(arch, pretrained, progress, *args, **kwargs): model = ShuffleNetV2(*args, **kwargs) if pretrained: model_url = model_urls[arch] if model_url is None: raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) else: state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) return model def shufflenet_v2_x0_5(pretrained=False, progress=True, **kwargs): """ Constructs a ShuffleNetV2 with 0.5x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/abs/1807.11164>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _shufflenetv2('shufflenetv2_x0.5', pretrained, progress, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs) def shufflenet_v2_x1_0(pretrained=False, progress=True, **kwargs): """ Constructs a ShuffleNetV2 with 1.0x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/abs/1807.11164>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _shufflenetv2('shufflenetv2_x1.0', pretrained, progress, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs) def shufflenet_v2_x1_5(pretrained=False, progress=True, **kwargs): """ Constructs a ShuffleNetV2 with 1.5x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/abs/1807.11164>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _shufflenetv2('shufflenetv2_x1.5', pretrained, progress, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs) def shufflenet_v2_x2_0(pretrained=False, progress=True, **kwargs): """ Constructs a ShuffleNetV2 with 2.0x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/abs/1807.11164>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _shufflenetv2('shufflenetv2_x2.0', pretrained, progress, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/shufflenetv2.py
0.897858
0.466846
shufflenetv2.py
pypi
import torch import torch.nn as nn from .utils import load_state_dict_from_url __all__ = ['AlexNet', 'alexnet'] model_urls = { 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth', } class AlexNet(nn.Module): def __init__(self, num_classes=1000): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), ) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def alexnet(pretrained=False, progress=True, **kwargs): r"""AlexNet model architecture from the `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = AlexNet(**kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls['alexnet'], progress=progress) model.load_state_dict(state_dict) return model
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/alexnet.py
0.93759
0.364466
alexnet.py
pypi
import warnings import torch import torch.nn as nn from .utils import load_state_dict_from_url __all__ = ['MNASNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3'] _MODEL_URLS = { "mnasnet0_5": "https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth", "mnasnet0_75": None, "mnasnet1_0": "https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth", "mnasnet1_3": None } # Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is # 1.0 - tensorflow. _BN_MOMENTUM = 1 - 0.9997 class _InvertedResidual(nn.Module): def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor, bn_momentum=0.1): super(_InvertedResidual, self).__init__() assert stride in [1, 2] assert kernel_size in [3, 5] mid_ch = in_ch * expansion_factor self.apply_residual = (in_ch == out_ch and stride == 1) self.layers = nn.Sequential( # Pointwise nn.Conv2d(in_ch, mid_ch, 1, bias=False), nn.BatchNorm2d(mid_ch, momentum=bn_momentum), nn.ReLU(inplace=True), # Depthwise nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, stride=stride, groups=mid_ch, bias=False), nn.BatchNorm2d(mid_ch, momentum=bn_momentum), nn.ReLU(inplace=True), # Linear pointwise. Note that there's no activation. nn.Conv2d(mid_ch, out_ch, 1, bias=False), nn.BatchNorm2d(out_ch, momentum=bn_momentum)) def forward(self, input): if self.apply_residual: return self.layers(input) + input else: return self.layers(input) def _stack(in_ch, out_ch, kernel_size, stride, exp_factor, repeats, bn_momentum): """ Creates a stack of inverted residuals. """ assert repeats >= 1 # First one has no skip, because feature map size changes. first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum) remaining = [] for _ in range(1, repeats): remaining.append( _InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum)) return nn.Sequential(first, *remaining) def _round_to_multiple_of(val, divisor, round_up_bias=0.9): """ Asymmetric rounding to make `val` divisible by `divisor`. With default bias, will round up, unless the number is no more than 10% greater than the smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """ assert 0.0 < round_up_bias < 1.0 new_val = max(divisor, int(val + divisor / 2) // divisor * divisor) return new_val if new_val >= round_up_bias * val else new_val + divisor def _get_depths(alpha): """ Scales tensor depths as in reference MobileNet code, prefers rouding up rather than down. """ depths = [32, 16, 24, 40, 80, 96, 192, 320] return [_round_to_multiple_of(depth * alpha, 8) for depth in depths] class MNASNet(torch.nn.Module): """ MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This implements the B1 variant of the model. >>> model = MNASNet(1000, 1.0) >>> x = torch.rand(1, 3, 224, 224) >>> y = model(x) >>> y.dim() 1 >>> y.nelement() 1000 """ # Version 2 adds depth scaling in the initial stages of the network. _version = 2 def __init__(self, alpha, num_classes=1000, dropout=0.2): super(MNASNet, self).__init__() assert alpha > 0.0 self.alpha = alpha self.num_classes = num_classes depths = _get_depths(alpha) layers = [ # First layer: regular conv. nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False), nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), # Depthwise separable, no skip. nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1, groups=depths[0], bias=False), nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False), nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM), # MNASNet blocks: stacks of inverted residuals. _stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM), _stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM), _stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM), _stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM), _stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM), _stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM), # Final mapping to classifier input. nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False), nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), ] self.layers = nn.Sequential(*layers) self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), nn.Linear(1280, num_classes)) self._initialize_weights() def forward(self, x): x = self.layers(x) # Equivalent to global avgpool and removing H and W dimensions. x = x.mean([2, 3]) return self.classifier(x) def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.kaiming_uniform_(m.weight, mode="fan_out", nonlinearity="sigmoid") nn.init.zeros_(m.bias) def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): version = local_metadata.get("version", None) assert version in [1, 2] if version == 1 and not self.alpha == 1.0: # In the initial version of the model (v1), stem was fixed-size. # All other layer configurations were the same. This will patch # the model so that it's identical to v1. Model with alpha 1.0 is # unaffected. depths = _get_depths(self.alpha) v1_stem = [ nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False), nn.BatchNorm2d(32, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32, bias=False), nn.BatchNorm2d(32, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True), nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False), nn.BatchNorm2d(16, momentum=_BN_MOMENTUM), _stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM), ] for idx, layer in enumerate(v1_stem): self.layers[idx] = layer # The model is now identical to v1, and must be saved as such. self._version = 1 warnings.warn( "A new version of MNASNet model has been implemented. " "Your checkpoint was saved using the previous version. " "This checkpoint will load and work as before, but " "you may want to upgrade by training a newer model or " "transfer learning from an updated ImageNet checkpoint.", UserWarning) super(MNASNet, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def _load_pretrained(model_name, model, progress): if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None: raise ValueError( "No checkpoint is available for model type {}".format(model_name)) checkpoint_url = _MODEL_URLS[model_name] model.load_state_dict( load_state_dict_from_url(checkpoint_url, progress=progress)) def mnasnet0_5(pretrained=False, progress=True, **kwargs): """MNASNet with depth multiplier of 0.5 from `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" <https://arxiv.org/pdf/1807.11626.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MNASNet(0.5, **kwargs) if pretrained: _load_pretrained("mnasnet0_5", model, progress) return model def mnasnet0_75(pretrained=False, progress=True, **kwargs): """MNASNet with depth multiplier of 0.75 from `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" <https://arxiv.org/pdf/1807.11626.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MNASNet(0.75, **kwargs) if pretrained: _load_pretrained("mnasnet0_75", model, progress) return model def mnasnet1_0(pretrained=False, progress=True, **kwargs): """MNASNet with depth multiplier of 1.0 from `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" <https://arxiv.org/pdf/1807.11626.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MNASNet(1.0, **kwargs) if pretrained: _load_pretrained("mnasnet1_0", model, progress) return model def mnasnet1_3(pretrained=False, progress=True, **kwargs): """MNASNet with depth multiplier of 1.3 from `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" <https://arxiv.org/pdf/1807.11626.pdf>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ model = MNASNet(1.3, **kwargs) if pretrained: _load_pretrained("mnasnet1_3", model, progress) return model
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/mnasnet.py
0.927986
0.558026
mnasnet.py
pypi
import torch import torch.nn as nn from .utils import load_state_dict_from_url __all__ = [ 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19_bn', 'vgg19', ] model_urls = { 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', } class VGG(nn.Module): def __init__(self, features, num_classes=1000, init_weights=True): super(VGG, self).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def make_layers(cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers) cfgs = { 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs): if pretrained: kwargs['init_weights'] = False model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model def vgg11(pretrained=False, progress=True, **kwargs): r"""VGG 11-layer model (configuration "A") from `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs) def vgg11_bn(pretrained=False, progress=True, **kwargs): r"""VGG 11-layer model (configuration "A") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs) def vgg13(pretrained=False, progress=True, **kwargs): r"""VGG 13-layer model (configuration "B") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs) def vgg13_bn(pretrained=False, progress=True, **kwargs): r"""VGG 13-layer model (configuration "B") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs) def vgg16(pretrained=False, progress=True, **kwargs): r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs) def vgg16_bn(pretrained=False, progress=True, **kwargs): r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs) def vgg19(pretrained=False, progress=True, **kwargs): r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs) def vgg19_bn(pretrained=False, progress=True, **kwargs): r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/vgg.py
0.930229
0.500488
vgg.py
pypi
from collections import OrderedDict import torch from torch import nn from torch.jit.annotations import Dict class IntermediateLayerGetter(nn.ModuleDict): """ Module wrapper that returns intermediate layers from a model It has a strong assumption that the modules have been registered into the model in the same order as they are used. This means that one should **not** reuse the same nn.Module twice in the forward if you want this to work. Additionally, it is only able to query submodules that are directly assigned to the model. So if `model` is passed, `model.feature1` can be returned, but not `model.feature1.layer2`. Arguments: model (nn.Module): model on which we will extract the features return_layers (Dict[name, new_name]): a dict containing the names of the modules for which the activations will be returned as the key of the dict, and the value of the dict is the name of the returned activation (which the user can specify). Examples:: >>> m = torchvision.models.resnet18(pretrained=True) >>> # extract layer1 and layer3, giving as names `feat1` and feat2` >>> new_m = torchvision.models._utils.IntermediateLayerGetter(m, >>> {'layer1': 'feat1', 'layer3': 'feat2'}) >>> out = new_m(torch.rand(1, 3, 224, 224)) >>> print([(k, v.shape) for k, v in out.items()]) >>> [('feat1', torch.Size([1, 64, 56, 56])), >>> ('feat2', torch.Size([1, 256, 14, 14]))] """ _version = 2 __annotations__ = { "return_layers": Dict[str, str], } def __init__(self, model, return_layers): if not set(return_layers).issubset([name for name, _ in model.named_children()]): raise ValueError("return_layers are not present in model") orig_return_layers = return_layers return_layers = {str(k): str(v) for k, v in return_layers.items()} layers = OrderedDict() for name, module in model.named_children(): layers[name] = module if name in return_layers: del return_layers[name] if not return_layers: break super(IntermediateLayerGetter, self).__init__(layers) self.return_layers = orig_return_layers def forward(self, x): out = OrderedDict() for name, module in self.items(): x = module(x) if name in self.return_layers: out_name = self.return_layers[name] out[out_name] = x return out
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/_utils.py
0.952508
0.748444
_utils.py
pypi
import torch from torch import nn from torch.nn import functional as F from ._utils import _SimpleSegmentationModel __all__ = ["DeepLabV3"] class DeepLabV3(_SimpleSegmentationModel): """ Implements DeepLabV3 model from `"Rethinking Atrous Convolution for Semantic Image Segmentation" <https://arxiv.org/abs/1706.05587>`_. Arguments: backbone (nn.Module): the network used to compute the features for the model. The backbone should return an OrderedDict[Tensor], with the key being "out" for the last feature map used, and "aux" if an auxiliary classifier is used. classifier (nn.Module): module that takes the "out" element returned from the backbone and returns a dense prediction. aux_classifier (nn.Module, optional): auxiliary classifier used during training """ pass class DeepLabHead(nn.Sequential): def __init__(self, in_channels, num_classes): super(DeepLabHead, self).__init__( ASPP(in_channels, [12, 24, 36]), nn.Conv2d(256, 256, 3, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, num_classes, 1) ) class ASPPConv(nn.Sequential): def __init__(self, in_channels, out_channels, dilation): modules = [ nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU() ] super(ASPPConv, self).__init__(*modules) class ASPPPooling(nn.Sequential): def __init__(self, in_channels, out_channels): super(ASPPPooling, self).__init__( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU()) def forward(self, x): size = x.shape[-2:] for mod in self: x = mod(x) return F.interpolate(x, size=size, mode='bilinear', align_corners=False) class ASPP(nn.Module): def __init__(self, in_channels, atrous_rates): super(ASPP, self).__init__() out_channels = 256 modules = [] modules.append(nn.Sequential( nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())) rate1, rate2, rate3 = tuple(atrous_rates) modules.append(ASPPConv(in_channels, out_channels, rate1)) modules.append(ASPPConv(in_channels, out_channels, rate2)) modules.append(ASPPConv(in_channels, out_channels, rate3)) modules.append(ASPPPooling(in_channels, out_channels)) self.convs = nn.ModuleList(modules) self.project = nn.Sequential( nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(), nn.Dropout(0.5)) def forward(self, x): res = [] for conv in self.convs: res.append(conv(x)) res = torch.cat(res, dim=1) return self.project(res)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/segmentation/deeplabv3.py
0.947769
0.550728
deeplabv3.py
pypi
from .._utils import IntermediateLayerGetter from ..utils import load_state_dict_from_url from .. import resnet from .deeplabv3 import DeepLabHead, DeepLabV3 from .fcn import FCN, FCNHead __all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101'] model_urls = { 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth', 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth', 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth', 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth', } def _segm_resnet(name, backbone_name, num_classes, aux, pretrained_backbone=True): backbone = resnet.__dict__[backbone_name]( pretrained=pretrained_backbone, replace_stride_with_dilation=[False, True, True]) return_layers = {'layer4': 'out'} if aux: return_layers['layer3'] = 'aux' backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) aux_classifier = None if aux: inplanes = 1024 aux_classifier = FCNHead(inplanes, num_classes) model_map = { 'deeplabv3': (DeepLabHead, DeepLabV3), 'fcn': (FCNHead, FCN), } inplanes = 2048 classifier = model_map[name][0](inplanes, num_classes) base_model = model_map[name][1] model = base_model(backbone, classifier, aux_classifier) return model def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs): if pretrained: aux_loss = True model = _segm_resnet(arch_type, backbone, num_classes, aux_loss, **kwargs) if pretrained: arch = arch_type + '_' + backbone + '_coco' model_url = model_urls[arch] if model_url is None: raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) else: state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) return model def fcn_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs): """Constructs a Fully-Convolutional Network model with a ResNet-50 backbone. Args: pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr """ return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) def fcn_resnet101(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs): """Constructs a Fully-Convolutional Network model with a ResNet-101 backbone. Args: pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr """ return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) def deeplabv3_resnet50(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs): """Constructs a DeepLabV3 model with a ResNet-50 backbone. Args: pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr """ return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) def deeplabv3_resnet101(pretrained=False, progress=True, num_classes=21, aux_loss=None, **kwargs): """Constructs a DeepLabV3 model with a ResNet-101 backbone. Args: pretrained (bool): If True, returns a model pre-trained on COCO train2017 which contains the same classes as Pascal VOC progress (bool): If True, displays a progress bar of the download to stderr """ return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/segmentation/segmentation.py
0.779322
0.341994
segmentation.py
pypi
import torch import torch.nn as nn from ..utils import load_state_dict_from_url __all__ = ['r3d_18', 'mc3_18', 'r2plus1d_18'] model_urls = { 'r3d_18': 'https://download.pytorch.org/models/r3d_18-b3b3357e.pth', 'mc3_18': 'https://download.pytorch.org/models/mc3_18-a90a0ba3.pth', 'r2plus1d_18': 'https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth', } class Conv3DSimple(nn.Conv3d): def __init__(self, in_planes, out_planes, midplanes=None, stride=1, padding=1): super(Conv3DSimple, self).__init__( in_channels=in_planes, out_channels=out_planes, kernel_size=(3, 3, 3), stride=stride, padding=padding, bias=False) @staticmethod def get_downsample_stride(stride): return (stride, stride, stride) class Conv2Plus1D(nn.Sequential): def __init__(self, in_planes, out_planes, midplanes, stride=1, padding=1): super(Conv2Plus1D, self).__init__( nn.Conv3d(in_planes, midplanes, kernel_size=(1, 3, 3), stride=(1, stride, stride), padding=(0, padding, padding), bias=False), nn.BatchNorm3d(midplanes), nn.ReLU(inplace=True), nn.Conv3d(midplanes, out_planes, kernel_size=(3, 1, 1), stride=(stride, 1, 1), padding=(padding, 0, 0), bias=False)) @staticmethod def get_downsample_stride(stride): return (stride, stride, stride) class Conv3DNoTemporal(nn.Conv3d): def __init__(self, in_planes, out_planes, midplanes=None, stride=1, padding=1): super(Conv3DNoTemporal, self).__init__( in_channels=in_planes, out_channels=out_planes, kernel_size=(1, 3, 3), stride=(1, stride, stride), padding=(0, padding, padding), bias=False) @staticmethod def get_downsample_stride(stride): return (1, stride, stride) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None): midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) super(BasicBlock, self).__init__() self.conv1 = nn.Sequential( conv_builder(inplanes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True) ) self.conv2 = nn.Sequential( conv_builder(planes, planes, midplanes), nn.BatchNorm3d(planes) ) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.conv2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None): super(Bottleneck, self).__init__() midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) # 1x1x1 self.conv1 = nn.Sequential( nn.Conv3d(inplanes, planes, kernel_size=1, bias=False), nn.BatchNorm3d(planes), nn.ReLU(inplace=True) ) # Second kernel self.conv2 = nn.Sequential( conv_builder(planes, planes, midplanes, stride), nn.BatchNorm3d(planes), nn.ReLU(inplace=True) ) # 1x1x1 self.conv3 = nn.Sequential( nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False), nn.BatchNorm3d(planes * self.expansion) ) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.conv2(out) out = self.conv3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class BasicStem(nn.Sequential): """The default conv-batchnorm-relu stem """ def __init__(self): super(BasicStem, self).__init__( nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), padding=(1, 3, 3), bias=False), nn.BatchNorm3d(64), nn.ReLU(inplace=True)) class R2Plus1dStem(nn.Sequential): """R(2+1)D stem is different than the default one as it uses separated 3D convolution """ def __init__(self): super(R2Plus1dStem, self).__init__( nn.Conv3d(3, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False), nn.BatchNorm3d(45), nn.ReLU(inplace=True), nn.Conv3d(45, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False), nn.BatchNorm3d(64), nn.ReLU(inplace=True)) class VideoResNet(nn.Module): def __init__(self, block, conv_makers, layers, stem, num_classes=400, zero_init_residual=False): """Generic resnet video generator. Args: block (nn.Module): resnet building block conv_makers (list(functions)): generator function for each layer layers (List[int]): number of blocks per layer stem (nn.Module, optional): Resnet stem, if None, defaults to conv-bn-relu. Defaults to None. num_classes (int, optional): Dimension of the final FC layer. Defaults to 400. zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False. """ super(VideoResNet, self).__init__() self.inplanes = 64 self.stem = stem() self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1) self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2) self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2) self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2) self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) # init weights self._initialize_weights() if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) def forward(self, x): x = self.stem(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) # Flatten the layer to fc x = x.flatten(1) x = self.fc(x) return x def _make_layer(self, block, conv_builder, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: ds_stride = conv_builder.get_downsample_stride(stride) downsample = nn.Sequential( nn.Conv3d(self.inplanes, planes * block.expansion, kernel_size=1, stride=ds_stride, bias=False), nn.BatchNorm3d(planes * block.expansion) ) layers = [] layers.append(block(self.inplanes, planes, conv_builder, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, conv_builder)) return nn.Sequential(*layers) def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm3d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def _video_resnet(arch, pretrained=False, progress=True, **kwargs): model = VideoResNet(**kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model def r3d_18(pretrained=False, progress=True, **kwargs): """Construct 18 layer Resnet3D model as in https://arxiv.org/abs/1711.11248 Args: pretrained (bool): If True, returns a model pre-trained on Kinetics-400 progress (bool): If True, displays a progress bar of the download to stderr Returns: nn.Module: R3D-18 network """ return _video_resnet('r3d_18', pretrained, progress, block=BasicBlock, conv_makers=[Conv3DSimple] * 4, layers=[2, 2, 2, 2], stem=BasicStem, **kwargs) def mc3_18(pretrained=False, progress=True, **kwargs): """Constructor for 18 layer Mixed Convolution network as in https://arxiv.org/abs/1711.11248 Args: pretrained (bool): If True, returns a model pre-trained on Kinetics-400 progress (bool): If True, displays a progress bar of the download to stderr Returns: nn.Module: MC3 Network definition """ return _video_resnet('mc3_18', pretrained, progress, block=BasicBlock, conv_makers=[Conv3DSimple] + [Conv3DNoTemporal] * 3, layers=[2, 2, 2, 2], stem=BasicStem, **kwargs) def r2plus1d_18(pretrained=False, progress=True, **kwargs): """Constructor for the 18 layer deep R(2+1)D network as in https://arxiv.org/abs/1711.11248 Args: pretrained (bool): If True, returns a model pre-trained on Kinetics-400 progress (bool): If True, displays a progress bar of the download to stderr Returns: nn.Module: R(2+1)D-18 network """ return _video_resnet('r2plus1d_18', pretrained, progress, block=BasicBlock, conv_makers=[Conv2Plus1D] * 4, layers=[2, 2, 2, 2], stem=R2Plus1dStem, **kwargs)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/video/resnet.py
0.947284
0.546315
resnet.py
pypi
import warnings from collections import namedtuple import torch import torch.nn as nn import torch.nn.functional as F from torchvision.models import inception as inception_module from torchvision.models.inception import InceptionOutputs from torch.jit.annotations import Optional from torchvision.models.utils import load_state_dict_from_url from .utils import _replace_relu, quantize_model __all__ = [ "QuantizableInception3", "inception_v3", ] quant_model_urls = { # fp32 weights ported from TensorFlow, quantized in PyTorch "inception_v3_google_fbgemm": "https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-71447a44.pth" } def inception_v3(pretrained=False, progress=True, quantize=False, **kwargs): r"""Inception v3 model architecture from `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. .. note:: **Important**: In contrast to the other models the inception_v3 expects tensors with a size of N x 3 x 299 x 299, so ensure your images are sized accordingly. Note that quantize = True returns a quantized model with 8 bit weights. Quantized models only support inference and run on CPUs. GPU inference is not yet supported Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr aux_logits (bool): If True, add an auxiliary branch that can improve training. Default: *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False* """ if pretrained: if "transform_input" not in kwargs: kwargs["transform_input"] = True if "aux_logits" in kwargs: original_aux_logits = kwargs["aux_logits"] kwargs["aux_logits"] = True else: original_aux_logits = False model = QuantizableInception3(**kwargs) _replace_relu(model) if quantize: # TODO use pretrained as a string to specify the backend backend = 'fbgemm' quantize_model(model, backend) else: assert pretrained in [True, False] if pretrained: if quantize: if not original_aux_logits: model.aux_logits = False del model.AuxLogits model_url = quant_model_urls['inception_v3_google' + '_' + backend] else: model_url = inception_module.model_urls['inception_v3_google'] state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) if not quantize: if not original_aux_logits: model.aux_logits = False del model.AuxLogits return model class QuantizableBasicConv2d(inception_module.BasicConv2d): def __init__(self, *args, **kwargs): super(QuantizableBasicConv2d, self).__init__(*args, **kwargs) self.relu = nn.ReLU() def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x def fuse_model(self): torch.quantization.fuse_modules(self, ["conv", "bn", "relu"], inplace=True) class QuantizableInceptionA(inception_module.InceptionA): def __init__(self, *args, **kwargs): super(QuantizableInceptionA, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) self.myop = nn.quantized.FloatFunctional() def forward(self, x): outputs = self._forward(x) return self.myop.cat(outputs, 1) class QuantizableInceptionB(inception_module.InceptionB): def __init__(self, *args, **kwargs): super(QuantizableInceptionB, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) self.myop = nn.quantized.FloatFunctional() def forward(self, x): outputs = self._forward(x) return self.myop.cat(outputs, 1) class QuantizableInceptionC(inception_module.InceptionC): def __init__(self, *args, **kwargs): super(QuantizableInceptionC, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) self.myop = nn.quantized.FloatFunctional() def forward(self, x): outputs = self._forward(x) return self.myop.cat(outputs, 1) class QuantizableInceptionD(inception_module.InceptionD): def __init__(self, *args, **kwargs): super(QuantizableInceptionD, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) self.myop = nn.quantized.FloatFunctional() def forward(self, x): outputs = self._forward(x) return self.myop.cat(outputs, 1) class QuantizableInceptionE(inception_module.InceptionE): def __init__(self, *args, **kwargs): super(QuantizableInceptionE, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) self.myop1 = nn.quantized.FloatFunctional() self.myop2 = nn.quantized.FloatFunctional() self.myop3 = nn.quantized.FloatFunctional() def _forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)] branch3x3 = self.myop1.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = self.myop2.cat(branch3x3dbl, 1) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return self.myop3.cat(outputs, 1) class QuantizableInceptionAux(inception_module.InceptionAux): def __init__(self, *args, **kwargs): super(QuantizableInceptionAux, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) class QuantizableInception3(inception_module.Inception3): def __init__(self, num_classes=1000, aux_logits=True, transform_input=False): super(QuantizableInception3, self).__init__( num_classes=num_classes, aux_logits=aux_logits, transform_input=transform_input, inception_blocks=[ QuantizableBasicConv2d, QuantizableInceptionA, QuantizableInceptionB, QuantizableInceptionC, QuantizableInceptionD, QuantizableInceptionE, QuantizableInceptionAux ] ) self.quant = torch.quantization.QuantStub() self.dequant = torch.quantization.DeQuantStub() def forward(self, x): x = self._transform_input(x) x = self.quant(x) x, aux = self._forward(x) x = self.dequant(x) aux_defined = self.training and self.aux_logits if torch.jit.is_scripting(): if not aux_defined: warnings.warn("Scripted QuantizableInception3 always returns QuantizableInception3 Tuple") return InceptionOutputs(x, aux) else: return self.eager_outputs(x, aux) def fuse_model(self): r"""Fuse conv/bn/relu modules in inception model Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization. Model is modified in place. Note that this operation does not change numerics and the model after modification is in floating point """ for m in self.modules(): if type(m) == QuantizableBasicConv2d: m.fuse_model()
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/quantization/inception.py
0.834407
0.566139
inception.py
pypi
from torch import nn from torchvision.models.utils import load_state_dict_from_url from torchvision.models.mobilenet import InvertedResidual, ConvBNReLU, MobileNetV2, model_urls from torch.quantization import QuantStub, DeQuantStub, fuse_modules from .utils import _replace_relu, quantize_model __all__ = ['QuantizableMobileNetV2', 'mobilenet_v2'] quant_model_urls = { 'mobilenet_v2_qnnpack': 'https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth' } class QuantizableInvertedResidual(InvertedResidual): def __init__(self, *args, **kwargs): super(QuantizableInvertedResidual, self).__init__(*args, **kwargs) self.skip_add = nn.quantized.FloatFunctional() def forward(self, x): if self.use_res_connect: return self.skip_add.add(x, self.conv(x)) else: return self.conv(x) def fuse_model(self): for idx in range(len(self.conv)): if type(self.conv[idx]) == nn.Conv2d: fuse_modules(self.conv, [str(idx), str(idx + 1)], inplace=True) class QuantizableMobileNetV2(MobileNetV2): def __init__(self, *args, **kwargs): """ MobileNet V2 main class Args: Inherits args from floating point MobileNetV2 """ super(QuantizableMobileNetV2, self).__init__(*args, **kwargs) self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self._forward_impl(x) x = self.dequant(x) return x def fuse_model(self): for m in self.modules(): if type(m) == ConvBNReLU: fuse_modules(m, ['0', '1', '2'], inplace=True) if type(m) == QuantizableInvertedResidual: m.fuse_model() def mobilenet_v2(pretrained=False, progress=True, quantize=False, **kwargs): """ Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Note that quantize = True returns a quantized model with 8 bit weights. Quantized models only support inference and run on CPUs. GPU inference is not yet supported Args: pretrained (bool): If True, returns a model pre-trained on ImageNet. progress (bool): If True, displays a progress bar of the download to stderr quantize(bool): If True, returns a quantized model, else returns a float model """ model = QuantizableMobileNetV2(block=QuantizableInvertedResidual, **kwargs) _replace_relu(model) if quantize: # TODO use pretrained as a string to specify the backend backend = 'qnnpack' quantize_model(model, backend) else: assert pretrained in [True, False] if pretrained: if quantize: model_url = quant_model_urls['mobilenet_v2_' + backend] else: model_url = model_urls['mobilenet_v2'] state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) return model
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/quantization/mobilenet.py
0.857067
0.331201
mobilenet.py
pypi
import warnings import torch import torch.nn as nn from torch.nn import functional as F from torch.jit.annotations import Optional from torchvision.models.utils import load_state_dict_from_url from torchvision.models.googlenet import ( GoogLeNetOutputs, BasicConv2d, Inception, InceptionAux, GoogLeNet, model_urls) from .utils import _replace_relu, quantize_model __all__ = ['QuantizableGoogLeNet', 'googlenet'] quant_model_urls = { # fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch 'googlenet_fbgemm': 'https://download.pytorch.org/models/quantized/googlenet_fbgemm-c00238cf.pth', } def googlenet(pretrained=False, progress=True, quantize=False, **kwargs): r"""GoogLeNet (Inception v1) model architecture from `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. Note that quantize = True returns a quantized model with 8 bit weights. Quantized models only support inference and run on CPUs. GPU inference is not yet supported Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr aux_logits (bool): If True, adds two auxiliary branches that can improve training. Default: *False* when pretrained is True otherwise *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False* """ if pretrained: if 'transform_input' not in kwargs: kwargs['transform_input'] = True if 'aux_logits' not in kwargs: kwargs['aux_logits'] = False if kwargs['aux_logits']: warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, ' 'so make sure to train them') original_aux_logits = kwargs['aux_logits'] kwargs['aux_logits'] = True kwargs['init_weights'] = False model = QuantizableGoogLeNet(**kwargs) _replace_relu(model) if quantize: # TODO use pretrained as a string to specify the backend backend = 'fbgemm' quantize_model(model, backend) else: assert pretrained in [True, False] if pretrained: if quantize: model_url = quant_model_urls['googlenet' + '_' + backend] else: model_url = model_urls['googlenet'] state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) if not original_aux_logits: model.aux_logits = False model.aux1 = None model.aux2 = None return model class QuantizableBasicConv2d(BasicConv2d): def __init__(self, *args, **kwargs): super(QuantizableBasicConv2d, self).__init__(*args, **kwargs) self.relu = nn.ReLU() def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x def fuse_model(self): torch.quantization.fuse_modules(self, ["conv", "bn", "relu"], inplace=True) class QuantizableInception(Inception): def __init__(self, *args, **kwargs): super(QuantizableInception, self).__init__( conv_block=QuantizableBasicConv2d, *args, **kwargs) self.cat = nn.quantized.FloatFunctional() def forward(self, x): outputs = self._forward(x) return self.cat.cat(outputs, 1) class QuantizableInceptionAux(InceptionAux): def __init__(self, *args, **kwargs): super(QuantizableInceptionAux, self).__init__( conv_block=QuantizableBasicConv2d, *args, **kwargs) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.7) def forward(self, x): # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 x = F.adaptive_avg_pool2d(x, (4, 4)) # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 x = self.conv(x) # N x 128 x 4 x 4 x = torch.flatten(x, 1) # N x 2048 x = self.relu(self.fc1(x)) # N x 1024 x = self.dropout(x) # N x 1024 x = self.fc2(x) # N x 1000 (num_classes) return x class QuantizableGoogLeNet(GoogLeNet): def __init__(self, *args, **kwargs): super(QuantizableGoogLeNet, self).__init__( blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], *args, **kwargs ) self.quant = torch.quantization.QuantStub() self.dequant = torch.quantization.DeQuantStub() def forward(self, x): x = self._transform_input(x) x = self.quant(x) x, aux1, aux2 = self._forward(x) x = self.dequant(x) aux_defined = self.training and self.aux_logits if torch.jit.is_scripting(): if not aux_defined: warnings.warn("Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple") return GoogLeNetOutputs(x, aux2, aux1) else: return self.eager_outputs(x, aux2, aux1) def fuse_model(self): r"""Fuse conv/bn/relu modules in googlenet model Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization. Model is modified in place. Note that this operation does not change numerics and the model after modification is in floating point """ for m in self.modules(): if type(m) == QuantizableBasicConv2d: m.fuse_model()
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/quantization/googlenet.py
0.784319
0.500183
googlenet.py
pypi
import torch import torch.nn as nn from torchvision.models.utils import load_state_dict_from_url import torchvision.models.shufflenetv2 import sys from .utils import _replace_relu, quantize_model shufflenetv2 = sys.modules['torchvision.models.shufflenetv2'] __all__ = [ 'QuantizableShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0' ] quant_model_urls = { 'shufflenetv2_x0.5_fbgemm': None, 'shufflenetv2_x1.0_fbgemm': 'https://download.pytorch.org/models/quantized/shufflenetv2_x1_fbgemm-db332c57.pth', 'shufflenetv2_x1.5_fbgemm': None, 'shufflenetv2_x2.0_fbgemm': None, } class QuantizableInvertedResidual(shufflenetv2.InvertedResidual): def __init__(self, *args, **kwargs): super(QuantizableInvertedResidual, self).__init__(*args, **kwargs) self.cat = nn.quantized.FloatFunctional() def forward(self, x): if self.stride == 1: x1, x2 = x.chunk(2, dim=1) out = self.cat.cat((x1, self.branch2(x2)), dim=1) else: out = self.cat.cat((self.branch1(x), self.branch2(x)), dim=1) out = shufflenetv2.channel_shuffle(out, 2) return out class QuantizableShuffleNetV2(shufflenetv2.ShuffleNetV2): def __init__(self, *args, **kwargs): super(QuantizableShuffleNetV2, self).__init__(*args, inverted_residual=QuantizableInvertedResidual, **kwargs) self.quant = torch.quantization.QuantStub() self.dequant = torch.quantization.DeQuantStub() def forward(self, x): x = self.quant(x) x = self._forward_impl(x) x = self.dequant(x) return x def fuse_model(self): r"""Fuse conv/bn/relu modules in shufflenetv2 model Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization. Model is modified in place. Note that this operation does not change numerics and the model after modification is in floating point """ for name, m in self._modules.items(): if name in ["conv1", "conv5"]: torch.quantization.fuse_modules(m, [["0", "1", "2"]], inplace=True) for m in self.modules(): if type(m) == QuantizableInvertedResidual: if len(m.branch1._modules.items()) > 0: torch.quantization.fuse_modules( m.branch1, [["0", "1"], ["2", "3", "4"]], inplace=True ) torch.quantization.fuse_modules( m.branch2, [["0", "1", "2"], ["3", "4"], ["5", "6", "7"]], inplace=True, ) def _shufflenetv2(arch, pretrained, progress, quantize, *args, **kwargs): model = QuantizableShuffleNetV2(*args, **kwargs) _replace_relu(model) if quantize: # TODO use pretrained as a string to specify the backend backend = 'fbgemm' quantize_model(model, backend) else: assert pretrained in [True, False] if pretrained: if quantize: model_url = quant_model_urls[arch + '_' + backend] else: model_url = shufflenetv2.model_urls[arch] state_dict = load_state_dict_from_url(model_url, progress=progress) model.load_state_dict(state_dict) return model def shufflenet_v2_x0_5(pretrained=False, progress=True, quantize=False, **kwargs): """ Constructs a ShuffleNetV2 with 0.5x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/abs/1807.11164>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _shufflenetv2('shufflenetv2_x0.5', pretrained, progress, quantize, [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs) def shufflenet_v2_x1_0(pretrained=False, progress=True, quantize=False, **kwargs): """ Constructs a ShuffleNetV2 with 1.0x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/abs/1807.11164>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _shufflenetv2('shufflenetv2_x1.0', pretrained, progress, quantize, [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs) def shufflenet_v2_x1_5(pretrained=False, progress=True, quantize=False, **kwargs): """ Constructs a ShuffleNetV2 with 1.5x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/abs/1807.11164>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _shufflenetv2('shufflenetv2_x1.5', pretrained, progress, quantize, [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs) def shufflenet_v2_x2_0(pretrained=False, progress=True, quantize=False, **kwargs): """ Constructs a ShuffleNetV2 with 2.0x output channels, as described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" <https://arxiv.org/abs/1807.11164>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr """ return _shufflenetv2('shufflenetv2_x2.0', pretrained, progress, quantize, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/quantization/shufflenetv2.py
0.642881
0.333707
shufflenetv2.py
pypi
from collections import OrderedDict from torch import nn from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool from torchvision.ops import misc as misc_nn_ops from .._utils import IntermediateLayerGetter from .. import resnet class BackboneWithFPN(nn.Module): """ Adds a FPN on top of a model. Internally, it uses torchvision.models._utils.IntermediateLayerGetter to extract a submodel that returns the feature maps specified in return_layers. The same limitations of IntermediatLayerGetter apply here. Arguments: backbone (nn.Module) return_layers (Dict[name, new_name]): a dict containing the names of the modules for which the activations will be returned as the key of the dict, and the value of the dict is the name of the returned activation (which the user can specify). in_channels_list (List[int]): number of channels for each feature map that is returned, in the order they are present in the OrderedDict out_channels (int): number of channels in the FPN. Attributes: out_channels (int): the number of channels in the FPN """ def __init__(self, backbone, return_layers, in_channels_list, out_channels): super(BackboneWithFPN, self).__init__() self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) self.fpn = FeaturePyramidNetwork( in_channels_list=in_channels_list, out_channels=out_channels, extra_blocks=LastLevelMaxPool(), ) self.out_channels = out_channels def forward(self, x): x = self.body(x) x = self.fpn(x) return x def resnet_fpn_backbone(backbone_name, pretrained, norm_layer=misc_nn_ops.FrozenBatchNorm2d): backbone = resnet.__dict__[backbone_name]( pretrained=pretrained, norm_layer=norm_layer) # freeze layers for name, parameter in backbone.named_parameters(): if 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: parameter.requires_grad_(False) return_layers = {'layer1': '0', 'layer2': '1', 'layer3': '2', 'layer4': '3'} in_channels_stage2 = backbone.inplanes // 8 in_channels_list = [ in_channels_stage2, in_channels_stage2 * 2, in_channels_stage2 * 4, in_channels_stage2 * 8, ] out_channels = 256 return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/detection/backbone_utils.py
0.949844
0.441312
backbone_utils.py
pypi
import random import math import torch from torch import nn, Tensor import torchvision from torch.jit.annotations import List, Tuple, Dict, Optional from torchvision.ops import misc as misc_nn_ops from .image_list import ImageList from .roi_heads import paste_masks_in_image @torch.jit.unused def _resize_image_and_masks_onnx(image, self_min_size, self_max_size, target): # type: (Tensor, float, float, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]] from torch.onnx import operators im_shape = operators.shape_as_tensor(image)[-2:] min_size = torch.min(im_shape).to(dtype=torch.float32) max_size = torch.max(im_shape).to(dtype=torch.float32) scale_factor = self_min_size / min_size if max_size * scale_factor > self_max_size: scale_factor = self_max_size / max_size image = torch.nn.functional.interpolate( image[None], scale_factor=scale_factor, mode='bilinear', align_corners=False)[0] if target is None: return image, target if "masks" in target: mask = target["masks"] mask = misc_nn_ops.interpolate(mask[None].float(), scale_factor=scale_factor)[0].byte() target["masks"] = mask return image, target def _resize_image_and_masks(image, self_min_size, self_max_size, target): # type: (Tensor, float, float, Optional[Dict[str, Tensor]]) -> Tuple[Tensor, Optional[Dict[str, Tensor]]] im_shape = torch.tensor(image.shape[-2:]) min_size = float(torch.min(im_shape)) max_size = float(torch.max(im_shape)) scale_factor = self_min_size / min_size if max_size * scale_factor > self_max_size: scale_factor = self_max_size / max_size image = torch.nn.functional.interpolate( image[None], scale_factor=scale_factor, mode='bilinear', align_corners=False)[0] if target is None: return image, target if "masks" in target: mask = target["masks"] mask = misc_nn_ops.interpolate(mask[None].float(), scale_factor=scale_factor)[0].byte() target["masks"] = mask return image, target class GeneralizedRCNNTransform(nn.Module): """ Performs input / target transformation before feeding the data to a GeneralizedRCNN model. The transformations it perform are: - input normalization (mean subtraction and std division) - input / target resizing to match min_size / max_size It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets """ def __init__(self, min_size, max_size, image_mean, image_std): super(GeneralizedRCNNTransform, self).__init__() if not isinstance(min_size, (list, tuple)): min_size = (min_size,) self.min_size = min_size self.max_size = max_size self.image_mean = image_mean self.image_std = image_std def forward(self, images, targets=None): # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) images = [img for img in images] for i in range(len(images)): image = images[i] target_index = targets[i] if targets is not None else None if image.dim() != 3: raise ValueError("images is expected to be a list of 3d tensors " "of shape [C, H, W], got {}".format(image.shape)) image = self.normalize(image) image, target_index = self.resize(image, target_index) images[i] = image if targets is not None and target_index is not None: targets[i] = target_index image_sizes = [img.shape[-2:] for img in images] images = self.batch_images(images) image_sizes_list = torch.jit.annotate(List[Tuple[int, int]], []) for image_size in image_sizes: assert len(image_size) == 2 image_sizes_list.append((image_size[0], image_size[1])) image_list = ImageList(images, image_sizes_list) return image_list, targets def normalize(self, image): dtype, device = image.dtype, image.device mean = torch.as_tensor(self.image_mean, dtype=dtype, device=device) std = torch.as_tensor(self.image_std, dtype=dtype, device=device) return (image - mean[:, None, None]) / std[:, None, None] def torch_choice(self, l): # type: (List[int]) """ Implements `random.choice` via torch ops so it can be compiled with TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803 is fixed. """ index = int(torch.empty(1).uniform_(0., float(len(l))).item()) return l[index] def resize(self, image, target): # type: (Tensor, Optional[Dict[str, Tensor]]) h, w = image.shape[-2:] if self.training: size = float(self.torch_choice(self.min_size)) else: # FIXME assume for now that testing uses the largest scale size = float(self.min_size[-1]) if torchvision._is_tracing(): image, target = _resize_image_and_masks_onnx(image, size, float(self.max_size), target) else: image, target = _resize_image_and_masks(image, size, float(self.max_size), target) if target is None: return image, target bbox = target["boxes"] bbox = resize_boxes(bbox, (h, w), image.shape[-2:]) target["boxes"] = bbox if "keypoints" in target: keypoints = target["keypoints"] keypoints = resize_keypoints(keypoints, (h, w), image.shape[-2:]) target["keypoints"] = keypoints return image, target # _onnx_batch_images() is an implementation of # batch_images() that is supported by ONNX tracing. @torch.jit.unused def _onnx_batch_images(self, images, size_divisible=32): # type: (List[Tensor], int) -> Tensor max_size = [] for i in range(images[0].dim()): max_size_i = torch.max(torch.stack([img.shape[i] for img in images]).to(torch.float32)).to(torch.int64) max_size.append(max_size_i) stride = size_divisible max_size[1] = (torch.ceil((max_size[1].to(torch.float32)) / stride) * stride).to(torch.int64) max_size[2] = (torch.ceil((max_size[2].to(torch.float32)) / stride) * stride).to(torch.int64) max_size = tuple(max_size) # work around for # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) # which is not yet supported in onnx padded_imgs = [] for img in images: padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) padded_imgs.append(padded_img) return torch.stack(padded_imgs) def max_by_axis(self, the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes def batch_images(self, images, size_divisible=32): # type: (List[Tensor], int) if torchvision._is_tracing(): # batch_images() does not export well to ONNX # call _onnx_batch_images() instead return self._onnx_batch_images(images, size_divisible) max_size = self.max_by_axis([list(img.shape) for img in images]) stride = float(size_divisible) max_size = list(max_size) max_size[1] = int(math.ceil(float(max_size[1]) / stride) * stride) max_size[2] = int(math.ceil(float(max_size[2]) / stride) * stride) batch_shape = [len(images)] + max_size batched_imgs = images[0].new_full(batch_shape, 0) for img, pad_img in zip(images, batched_imgs): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) return batched_imgs def postprocess(self, result, image_shapes, original_image_sizes): # type: (List[Dict[str, Tensor]], List[Tuple[int, int]], List[Tuple[int, int]]) if self.training: return result for i, (pred, im_s, o_im_s) in enumerate(zip(result, image_shapes, original_image_sizes)): boxes = pred["boxes"] boxes = resize_boxes(boxes, im_s, o_im_s) result[i]["boxes"] = boxes if "masks" in pred: masks = pred["masks"] masks = paste_masks_in_image(masks, boxes, o_im_s) result[i]["masks"] = masks if "keypoints" in pred: keypoints = pred["keypoints"] keypoints = resize_keypoints(keypoints, im_s, o_im_s) result[i]["keypoints"] = keypoints return result def __repr__(self): format_string = self.__class__.__name__ + '(' _indent = '\n ' format_string += "{0}Normalize(mean={1}, std={2})".format(_indent, self.image_mean, self.image_std) format_string += "{0}Resize(min_size={1}, max_size={2}, mode='bilinear')".format(_indent, self.min_size, self.max_size) format_string += '\n)' return format_string def resize_keypoints(keypoints, original_size, new_size): # type: (Tensor, List[int], List[int]) ratios = [ torch.tensor(s, dtype=torch.float32, device=keypoints.device) / torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device) for s, s_orig in zip(new_size, original_size) ] ratio_h, ratio_w = ratios resized_data = keypoints.clone() if torch._C._get_tracing_state(): resized_data_0 = resized_data[:, :, 0] * ratio_w resized_data_1 = resized_data[:, :, 1] * ratio_h resized_data = torch.stack((resized_data_0, resized_data_1, resized_data[:, :, 2]), dim=2) else: resized_data[..., 0] *= ratio_w resized_data[..., 1] *= ratio_h return resized_data def resize_boxes(boxes, original_size, new_size): # type: (Tensor, List[int], List[int]) ratios = [ torch.tensor(s, dtype=torch.float32, device=boxes.device) / torch.tensor(s_orig, dtype=torch.float32, device=boxes.device) for s, s_orig in zip(new_size, original_size) ] ratio_height, ratio_width = ratios xmin, ymin, xmax, ymax = boxes.unbind(1) xmin = xmin * ratio_width xmax = xmax * ratio_width ymin = ymin * ratio_height ymax = ymax * ratio_height return torch.stack((xmin, ymin, xmax, ymax), dim=1)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/detection/transform.py
0.887926
0.544801
transform.py
pypi
import math import torch from torch.jit.annotations import List, Tuple from torch import Tensor import torchvision # TODO: https://github.com/pytorch/pytorch/issues/26727 def zeros_like(tensor, dtype): # type: (Tensor, int) -> Tensor return torch.zeros_like(tensor, dtype=dtype, layout=tensor.layout, device=tensor.device, pin_memory=tensor.is_pinned()) @torch.jit.script class BalancedPositiveNegativeSampler(object): """ This class samples batches, ensuring that they contain a fixed proportion of positives """ def __init__(self, batch_size_per_image, positive_fraction): # type: (int, float) """ Arguments: batch_size_per_image (int): number of elements to be selected per image positive_fraction (float): percentace of positive elements per batch """ self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction def __call__(self, matched_idxs): # type: (List[Tensor]) """ Arguments: matched idxs: list of tensors containing -1, 0 or positive values. Each tensor corresponds to a specific image. -1 values are ignored, 0 are considered as negatives and > 0 as positives. Returns: pos_idx (list[tensor]) neg_idx (list[tensor]) Returns two lists of binary masks for each image. The first list contains the positive elements that were selected, and the second list the negative example. """ pos_idx = [] neg_idx = [] for matched_idxs_per_image in matched_idxs: positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1) negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1) num_pos = int(self.batch_size_per_image * self.positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = self.batch_size_per_image - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx_per_image = positive[perm1] neg_idx_per_image = negative[perm2] # create binary mask from indices pos_idx_per_image_mask = zeros_like( matched_idxs_per_image, dtype=torch.uint8 ) neg_idx_per_image_mask = zeros_like( matched_idxs_per_image, dtype=torch.uint8 ) pos_idx_per_image_mask[pos_idx_per_image] = torch.tensor(1, dtype=torch.uint8) neg_idx_per_image_mask[neg_idx_per_image] = torch.tensor(1, dtype=torch.uint8) pos_idx.append(pos_idx_per_image_mask) neg_idx.append(neg_idx_per_image_mask) return pos_idx, neg_idx @torch.jit.script def encode_boxes(reference_boxes, proposals, weights): # type: (torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor """ Encode a set of proposals with respect to some reference boxes Arguments: reference_boxes (Tensor): reference boxes proposals (Tensor): boxes to be encoded """ # perform some unpacking to make it JIT-fusion friendly wx = weights[0] wy = weights[1] ww = weights[2] wh = weights[3] proposals_x1 = proposals[:, 0].unsqueeze(1) proposals_y1 = proposals[:, 1].unsqueeze(1) proposals_x2 = proposals[:, 2].unsqueeze(1) proposals_y2 = proposals[:, 3].unsqueeze(1) reference_boxes_x1 = reference_boxes[:, 0].unsqueeze(1) reference_boxes_y1 = reference_boxes[:, 1].unsqueeze(1) reference_boxes_x2 = reference_boxes[:, 2].unsqueeze(1) reference_boxes_y2 = reference_boxes[:, 3].unsqueeze(1) # implementation starts here ex_widths = proposals_x2 - proposals_x1 ex_heights = proposals_y2 - proposals_y1 ex_ctr_x = proposals_x1 + 0.5 * ex_widths ex_ctr_y = proposals_y1 + 0.5 * ex_heights gt_widths = reference_boxes_x2 - reference_boxes_x1 gt_heights = reference_boxes_y2 - reference_boxes_y1 gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights targets_dw = ww * torch.log(gt_widths / ex_widths) targets_dh = wh * torch.log(gt_heights / ex_heights) targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1) return targets @torch.jit.script class BoxCoder(object): """ This class encodes and decodes a set of bounding boxes into the representation used for training the regressors. """ def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)): # type: (Tuple[float, float, float, float], float) """ Arguments: weights (4-element tuple) bbox_xform_clip (float) """ self.weights = weights self.bbox_xform_clip = bbox_xform_clip def encode(self, reference_boxes, proposals): # type: (List[Tensor], List[Tensor]) boxes_per_image = [len(b) for b in reference_boxes] reference_boxes = torch.cat(reference_boxes, dim=0) proposals = torch.cat(proposals, dim=0) targets = self.encode_single(reference_boxes, proposals) return targets.split(boxes_per_image, 0) def encode_single(self, reference_boxes, proposals): """ Encode a set of proposals with respect to some reference boxes Arguments: reference_boxes (Tensor): reference boxes proposals (Tensor): boxes to be encoded """ dtype = reference_boxes.dtype device = reference_boxes.device weights = torch.as_tensor(self.weights, dtype=dtype, device=device) targets = encode_boxes(reference_boxes, proposals, weights) return targets def decode(self, rel_codes, boxes): # type: (Tensor, List[Tensor]) assert isinstance(boxes, (list, tuple)) assert isinstance(rel_codes, torch.Tensor) boxes_per_image = [b.size(0) for b in boxes] concat_boxes = torch.cat(boxes, dim=0) box_sum = 0 for val in boxes_per_image: box_sum += val pred_boxes = self.decode_single( rel_codes.reshape(box_sum, -1), concat_boxes ) return pred_boxes.reshape(box_sum, -1, 4) def decode_single(self, rel_codes, boxes): """ From a set of original boxes and encoded relative box offsets, get the decoded boxes. Arguments: rel_codes (Tensor): encoded boxes boxes (Tensor): reference boxes. """ boxes = boxes.to(rel_codes.dtype) widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = rel_codes[:, 0::4] / wx dy = rel_codes[:, 1::4] / wy dw = rel_codes[:, 2::4] / ww dh = rel_codes[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.bbox_xform_clip) dh = torch.clamp(dh, max=self.bbox_xform_clip) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes1 = pred_ctr_x - torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w pred_boxes2 = pred_ctr_y - torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h pred_boxes3 = pred_ctr_x + torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w pred_boxes4 = pred_ctr_y + torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=2).flatten(1) return pred_boxes @torch.jit.script class Matcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be assigned to zero or more predicted elements. Matching is based on the MxN match_quality_matrix, that characterizes how well each (ground-truth, predicted)-pair match. For example, if the elements are boxes, the matrix may contain box IoU overlap values. The matcher returns a tensor of size N containing the index of the ground-truth element m that matches to prediction n. If there is no match, a negative value is returned. """ BELOW_LOW_THRESHOLD = -1 BETWEEN_THRESHOLDS = -2 __annotations__ = { 'BELOW_LOW_THRESHOLD': int, 'BETWEEN_THRESHOLDS': int, } def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False): # type: (float, float, bool) """ Args: high_threshold (float): quality values greater than or equal to this value are candidate matches. low_threshold (float): a lower quality threshold used to stratify matches into three levels: 1) matches >= high_threshold 2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold) 3) BELOW_LOW_THRESHOLD matches in [0, low_threshold) allow_low_quality_matches (bool): if True, produce additional matches for predictions that have only low-quality match candidates. See set_low_quality_matches_ for more details. """ self.BELOW_LOW_THRESHOLD = -1 self.BETWEEN_THRESHOLDS = -2 assert low_threshold <= high_threshold self.high_threshold = high_threshold self.low_threshold = low_threshold self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. Returns: matches (Tensor[int64]): an N tensor where N[i] is a matched gt in [0, M - 1] or a negative value indicating that prediction i could not be matched. """ if match_quality_matrix.numel() == 0: # empty targets or proposals not supported during training if match_quality_matrix.shape[0] == 0: raise ValueError( "No ground-truth boxes available for one of the images " "during training") else: raise ValueError( "No proposal boxes available for one of the images " "during training") # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) if self.allow_low_quality_matches: all_matches = matches.clone() else: all_matches = None # Assign candidate matches with low quality to negative (unassigned) values below_low_threshold = matched_vals < self.low_threshold between_thresholds = (matched_vals >= self.low_threshold) & ( matched_vals < self.high_threshold ) matches[below_low_threshold] = torch.tensor(self.BELOW_LOW_THRESHOLD) matches[between_thresholds] = torch.tensor(self.BETWEEN_THRESHOLDS) if self.allow_low_quality_matches: assert all_matches is not None self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) return matches def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth with which it has the highest quality value. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find highest quality match available, even if it is low, including ties gt_pred_pairs_of_highest_quality = torch.nonzero( match_quality_matrix == highest_quality_foreach_gt[:, None] ) # Example gt_pred_pairs_of_highest_quality: # tensor([[ 0, 39796], # [ 1, 32055], # [ 1, 32070], # [ 2, 39190], # [ 2, 40255], # [ 3, 40390], # [ 3, 41455], # [ 4, 45470], # [ 5, 45325], # [ 5, 46390]]) # Each row is a (gt index, prediction index) # Note how gt items 1, 2, 3, and 5 each have two ties pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1] matches[pred_inds_to_update] = all_matches[pred_inds_to_update] def smooth_l1_loss(input, target, beta: float = 1. / 9, size_average: bool = True): """ very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter """ n = torch.abs(input - target) cond = n < beta loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta) if size_average: return loss.mean() return loss.sum()
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/detection/_utils.py
0.736306
0.674443
_utils.py
pypi
from collections import OrderedDict import torch from torch import nn import warnings from torch.jit.annotations import Tuple, List, Dict, Optional from torch import Tensor class GeneralizedRCNN(nn.Module): """ Main class for Generalized R-CNN. Arguments: backbone (nn.Module): rpn (nn.Module): roi_heads (nn.Module): takes the features + the proposals from the RPN and computes detections / masks from it. transform (nn.Module): performs the data transformation from the inputs to feed into the model """ def __init__(self, backbone, rpn, roi_heads, transform): super(GeneralizedRCNN, self).__init__() self.transform = transform self.backbone = backbone self.rpn = rpn self.roi_heads = roi_heads # used only on torchscript mode self._has_warned = False @torch.jit.unused def eager_outputs(self, losses, detections): # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] if self.training: return losses return detections def forward(self, images, targets=None): # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) """ Arguments: images (list[Tensor]): images to be processed targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional) Returns: result (list[BoxList] or dict[Tensor]): the output from the model. During training, it returns a dict[Tensor] which contains the losses. During testing, it returns list[BoxList] contains additional fields like `scores`, `labels` and `mask` (for Mask R-CNN models). """ if self.training and targets is None: raise ValueError("In training mode, targets should be passed") original_image_sizes = torch.jit.annotate(List[Tuple[int, int]], []) for img in images: val = img.shape[-2:] assert len(val) == 2 original_image_sizes.append((val[0], val[1])) images, targets = self.transform(images, targets) features = self.backbone(images.tensors) if isinstance(features, torch.Tensor): features = OrderedDict([('0', features)]) proposals, proposal_losses = self.rpn(images, features, targets) detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets) detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) losses = {} losses.update(detector_losses) losses.update(proposal_losses) if torch.jit.is_scripting(): if not self._has_warned: warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting") self._has_warned = True return (losses, detections) else: return self.eager_outputs(losses, detections)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/models/detection/generalized_rcnn.py
0.962612
0.558086
generalized_rcnn.py
pypi
import torch from torch import nn, Tensor from torch.nn.modules.utils import _pair from torch.jit.annotations import List from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape def ps_roi_align(input, boxes, output_size, spatial_scale=1.0, sampling_ratio=-1): # type: (Tensor, Tensor, int, float, int) -> Tensor """ Performs Position-Sensitive Region of Interest (RoI) Align operator mentioned in Light-Head R-CNN. Arguments: input (Tensor[N, C, H, W]): input tensor boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from. If a single Tensor is passed, then the first column should contain the batch index. If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i in a batch output_size (int or Tuple[int, int]): the size of the output after the cropping is performed, as (height, width) spatial_scale (float): a scaling factor that maps the input coordinates to the box coordinates. Default: 1.0 sampling_ratio (int): number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0 then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height). Default: -1 Returns: output (Tensor[K, C, output_size[0], output_size[1]]) """ check_roi_boxes_shape(boxes) rois = boxes output_size = _pair(output_size) if not isinstance(rois, torch.Tensor): rois = convert_boxes_to_roi_format(rois) output, _ = torch.ops.torchvision.ps_roi_align(input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio) return output class PSRoIAlign(nn.Module): """ See ps_roi_align """ def __init__(self, output_size, spatial_scale, sampling_ratio): super(PSRoIAlign, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale self.sampling_ratio = sampling_ratio def forward(self, input, rois): return ps_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio) def __repr__(self): tmpstr = self.__class__.__name__ + '(' tmpstr += 'output_size=' + str(self.output_size) tmpstr += ', spatial_scale=' + str(self.spatial_scale) tmpstr += ', sampling_ratio=' + str(self.sampling_ratio) tmpstr += ')' return tmpstr
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/ops/ps_roi_align.py
0.902629
0.773772
ps_roi_align.py
pypi
import torch from torch import nn, Tensor from torch.nn.modules.utils import _pair from torch.jit.annotations import List from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape def ps_roi_pool(input, boxes, output_size, spatial_scale=1.0): # type: (Tensor, Tensor, int, float) -> Tensor """ Performs Position-Sensitive Region of Interest (RoI) Pool operator described in R-FCN Arguments: input (Tensor[N, C, H, W]): input tensor boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from. If a single Tensor is passed, then the first column should contain the batch index. If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i in a batch output_size (int or Tuple[int, int]): the size of the output after the cropping is performed, as (height, width) spatial_scale (float): a scaling factor that maps the input coordinates to the box coordinates. Default: 1.0 Returns: output (Tensor[K, C, output_size[0], output_size[1]]) """ check_roi_boxes_shape(boxes) rois = boxes output_size = _pair(output_size) if not isinstance(rois, torch.Tensor): rois = convert_boxes_to_roi_format(rois) output, _ = torch.ops.torchvision.ps_roi_pool(input, rois, spatial_scale, output_size[0], output_size[1]) return output class PSRoIPool(nn.Module): """ See ps_roi_pool """ def __init__(self, output_size, spatial_scale): super(PSRoIPool, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale def forward(self, input, rois): return ps_roi_pool(input, rois, self.output_size, self.spatial_scale) def __repr__(self): tmpstr = self.__class__.__name__ + '(' tmpstr += 'output_size=' + str(self.output_size) tmpstr += ', spatial_scale=' + str(self.spatial_scale) tmpstr += ')' return tmpstr
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/ops/ps_roi_pool.py
0.902583
0.756627
ps_roi_pool.py
pypi
import sys import torch _onnx_opset_version = 11 def _register_custom_op(): from torch.onnx.symbolic_helper import parse_args, scalar_type_to_onnx, scalar_type_to_pytorch_type, \ cast_pytorch_to_onnx from torch.onnx.symbolic_opset9 import select, unsqueeze, squeeze, _cast_Long, reshape @parse_args('v', 'v', 'f') def symbolic_multi_label_nms(g, boxes, scores, iou_threshold): boxes = unsqueeze(g, boxes, 0) scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) max_output_per_class = g.op('Constant', value_t=torch.tensor([sys.maxsize], dtype=torch.long)) iou_threshold = g.op('Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) nms_out = g.op('NonMaxSuppression', boxes, scores, max_output_per_class, iou_threshold) return squeeze(g, select(g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))), 1) @parse_args('v', 'v', 'f', 'i', 'i', 'i', 'i') def roi_align(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned): if(aligned): raise RuntimeError('Unsupported: ONNX export of roi_align with aligned') batch_indices = _cast_Long(g, squeeze(g, select(g, rois, 1, g.op('Constant', value_t=torch.tensor([0], dtype=torch.long))), 1), False) rois = select(g, rois, 1, g.op('Constant', value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long))) return g.op('RoiAlign', input, rois, batch_indices, spatial_scale_f=spatial_scale, output_height_i=pooled_height, output_width_i=pooled_width, sampling_ratio_i=sampling_ratio) @parse_args('v', 'v', 'f', 'i', 'i') def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width): roi_pool = g.op('MaxRoiPool', input, rois, pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale) return roi_pool, None @parse_args('v', 'is') def new_empty_tensor_op(g, input, shape): dtype = input.type().scalarType() if dtype is None: dtype = 'Float' dtype = scalar_type_to_onnx.index(cast_pytorch_to_onnx[dtype]) shape = g.op("Constant", value_t=torch.tensor(shape)) return g.op("ConstantOfShape", shape, value_t=torch.tensor([0], dtype=scalar_type_to_pytorch_type[dtype])) from torch.onnx import register_custom_op_symbolic register_custom_op_symbolic('torchvision::nms', symbolic_multi_label_nms, _onnx_opset_version) register_custom_op_symbolic('torchvision::roi_align', roi_align, _onnx_opset_version) register_custom_op_symbolic('torchvision::roi_pool', roi_pool, _onnx_opset_version) register_custom_op_symbolic('torchvision::_new_empty_tensor_op', new_empty_tensor_op, _onnx_opset_version)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/ops/_register_onnx_ops.py
0.560974
0.336713
_register_onnx_ops.py
pypi
import torch from torch import nn, Tensor from torch.nn.modules.utils import _pair from torch.jit.annotations import List, BroadcastingList2 from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape def roi_pool(input, boxes, output_size, spatial_scale=1.0): # type: (Tensor, Tensor, BroadcastingList2[int], float) -> Tensor """ Performs Region of Interest (RoI) Pool operator described in Fast R-CNN Arguments: input (Tensor[N, C, H, W]): input tensor boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from. If a single Tensor is passed, then the first column should contain the batch index. If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i in a batch output_size (int or Tuple[int, int]): the size of the output after the cropping is performed, as (height, width) spatial_scale (float): a scaling factor that maps the input coordinates to the box coordinates. Default: 1.0 Returns: output (Tensor[K, C, output_size[0], output_size[1]]) """ check_roi_boxes_shape(boxes) rois = boxes output_size = _pair(output_size) if not isinstance(rois, torch.Tensor): rois = convert_boxes_to_roi_format(rois) output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, output_size[0], output_size[1]) return output class RoIPool(nn.Module): """ See roi_pool """ def __init__(self, output_size, spatial_scale): super(RoIPool, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale def forward(self, input, rois): return roi_pool(input, rois, self.output_size, self.spatial_scale) def __repr__(self): tmpstr = self.__class__.__name__ + '(' tmpstr += 'output_size=' + str(self.output_size) tmpstr += ', spatial_scale=' + str(self.spatial_scale) tmpstr += ')' return tmpstr
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/ops/roi_pool.py
0.891923
0.762336
roi_pool.py
pypi
import math import torch from torch import nn, Tensor from torch.nn import init from torch.nn.parameter import Parameter from torch.nn.modules.utils import _pair from torch.jit.annotations import Optional, Tuple def deform_conv2d(input, offset, weight, bias=None, stride=(1, 1), padding=(0, 0), dilation=(1, 1)): # type: (Tensor, Tensor, Tensor, Optional[Tensor], Tuple[int, int], Tuple[int, int], Tuple[int, int]) -> Tensor """ Performs Deformable Convolution, described in Deformable Convolutional Networks Arguments: input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, out_height, out_width]): offsets to be applied for each position in the convolution kernel. weight (Tensor[out_channels, in_channels // groups, kernel_height, kernel_width]): convolution weights, split into groups of size (in_channels // groups) bias (Tensor[out_channels]): optional bias of shape (out_channels,). Default: None stride (int or Tuple[int, int]): distance between convolution centers. Default: 1 padding (int or Tuple[int, int]): height/width of padding of zeroes around each image. Default: 0 dilation (int or Tuple[int, int]): the spacing between kernel elements. Default: 1 Returns: output (Tensor[batch_sz, out_channels, out_h, out_w]): result of convolution Examples:: >>> input = torch.rand(1, 3, 10, 10) >>> kh, kw = 3, 3 >>> weight = torch.rand(5, 3, kh, kw) >>> # offset should have the same spatial size as the output >>> # of the convolution. In this case, for an input of 10, stride of 1 >>> # and kernel size of 3, without padding, the output size is 8 >>> offset = torch.rand(5, 2 * kh * kw, 8, 8) >>> out = deform_conv2d(input, offset, weight) >>> print(out.shape) >>> # returns >>> torch.Size([1, 5, 8, 8]) """ out_channels = weight.shape[0] if bias is None: bias = torch.zeros(out_channels, device=input.device, dtype=input.dtype) stride_h, stride_w = _pair(stride) pad_h, pad_w = _pair(padding) dil_h, dil_w = _pair(dilation) weights_h, weights_w = weight.shape[-2:] _, n_in_channels, in_h, in_w = input.shape n_offset_grps = offset.shape[1] // (2 * weights_h * weights_w) n_weight_grps = n_in_channels // weight.shape[1] if n_offset_grps == 0: raise RuntimeError( "the shape of the offset tensor at dimension 1 is not valid. It should " "be a multiple of 2 * weight.size[2] * weight.size[3].\n" "Got offset.shape[1]={}, while 2 * weight.size[2] * weight.size[3]={}".format( offset.shape[1], 2 * weights_h * weights_w)) return torch.ops.torchvision.deform_conv2d( input, weight, offset, bias, stride_h, stride_w, pad_h, pad_w, dil_h, dil_w, n_weight_grps, n_offset_grps) class DeformConv2d(nn.Module): """ See deform_conv2d """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super(DeformConv2d, self).__init__() if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups') if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups') self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = _pair(kernel_size) self.stride = _pair(stride) self.padding = _pair(padding) self.dilation = _pair(dilation) self.groups = groups self.weight = Parameter(torch.empty(out_channels, in_channels // groups, self.kernel_size[0], self.kernel_size[1])) if bias: self.bias = Parameter(torch.empty(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input, offset): """ Arguments: input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, out_height, out_width]): offsets to be applied for each position in the convolution kernel. """ return deform_conv2d(input, offset, self.weight, self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation) def __repr__(self): s = self.__class__.__name__ + '(' s += '{in_channels}' s += ', {out_channels}' s += ', kernel_size={kernel_size}' s += ', stride={stride}' s += ', padding={padding}' if self.padding != (0, 0) else '' s += ', dilation={dilation}' if self.dilation != (1, 1) else '' s += ', groups={groups}' if self.groups != 1 else '' s += ', bias=False' if self.bias is None else '' s += ')' return s.format(**self.__dict__)
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/ops/deform_conv.py
0.93847
0.707177
deform_conv.py
pypi
import torch from torch import nn, Tensor from torch.nn.modules.utils import _pair from torch.jit.annotations import List, BroadcastingList2 from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape def roi_align(input, boxes, output_size, spatial_scale=1.0, sampling_ratio=-1, aligned=False): # type: (Tensor, Tensor, BroadcastingList2[int], float, int, bool) -> Tensor """ Performs Region of Interest (RoI) Align operator described in Mask R-CNN Arguments: input (Tensor[N, C, H, W]): input tensor boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from. If a single Tensor is passed, then the first column should contain the batch index. If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i in a batch output_size (int or Tuple[int, int]): the size of the output after the cropping is performed, as (height, width) spatial_scale (float): a scaling factor that maps the input coordinates to the box coordinates. Default: 1.0 sampling_ratio (int): number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height). Default: -1 aligned (bool): If False, use the legacy implementation. If True, pixel shift it by -0.5 for align more perfectly about two neighboring pixel indices. This version in Detectron2 Returns: output (Tensor[K, C, output_size[0], output_size[1]]) """ check_roi_boxes_shape(boxes) rois = boxes output_size = _pair(output_size) if not isinstance(rois, torch.Tensor): rois = convert_boxes_to_roi_format(rois) return torch.ops.torchvision.roi_align(input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned) class RoIAlign(nn.Module): """ See roi_align """ def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=False): super(RoIAlign, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale self.sampling_ratio = sampling_ratio self.aligned = aligned def forward(self, input, rois): return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) def __repr__(self): tmpstr = self.__class__.__name__ + '(' tmpstr += 'output_size=' + str(self.output_size) tmpstr += ', spatial_scale=' + str(self.spatial_scale) tmpstr += ', sampling_ratio=' + str(self.sampling_ratio) tmpstr += ', aligned=' + str(self.aligned) tmpstr += ')' return tmpstr
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/ops/roi_align.py
0.90928
0.773302
roi_align.py
pypi