after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def lazy_covariance_matrix(self):
"""
The covariance_matrix, represented as a LazyTensor
"""
return super().lazy_covariance_matrix
|
def lazy_covariance_matrix(self):
"""
The covariance_matrix, represented as a LazyTensor
"""
if self.islazy:
return self._covar
else:
return lazify(super().covariance_matrix)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def add_diag(self, added_diag):
shape = _mul_broadcast_shape(self._diag.shape, added_diag.shape)
return DiagLazyTensor(self._diag.expand(shape) + added_diag.expand(shape))
|
def add_diag(self, added_diag):
return DiagLazyTensor(self._diag + added_diag.expand_as(self._diag))
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def _size(self):
return _matmul_broadcast_shape(
self.left_lazy_tensor.shape, self.right_lazy_tensor.shape
)
|
def _size(self):
return torch.Size(
(
*self.left_lazy_tensor.batch_shape,
self.left_lazy_tensor.size(-2),
self.right_lazy_tensor.size(-1),
)
)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def _size(self):
return _mul_broadcast_shape(*[lt.shape for lt in self.lazy_tensors])
|
def _size(self):
return self.lazy_tensors[0].size()
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def expected_log_prob(self, observations, function_dist, *params, **kwargs):
if torch.any(observations.eq(-1)):
warnings.warn(
"BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. "
"Observations with labels in {-1, 1} are deprecated.",
DeprecationWarning,
)
else:
observations = observations.mul(2).sub(1)
# Custom function here so we can use log_normal_cdf rather than Normal.cdf
# This is going to be less prone to overflow errors
log_prob_lambda = lambda function_samples: log_normal_cdf(
function_samples.mul(observations)
)
log_prob = self.quadrature(log_prob_lambda, function_dist)
return log_prob
|
def expected_log_prob(self, observations, function_dist, *params, **kwargs):
if torch.any(observations.eq(-1)):
warnings.warn(
"BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. "
"Observations with labels in {-1, 1} are deprecated.",
DeprecationWarning,
)
else:
observations = observations.mul(2).sub(1)
# Custom function here so we can use log_normal_cdf rather than Normal.cdf
# This is going to be less prone to overflow errors
log_prob_lambda = lambda function_samples: log_normal_cdf(
function_samples.mul(observations)
)
log_prob = self.quadrature(log_prob_lambda, function_dist)
return log_prob.sum(-1)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def expected_log_prob(
self, target: Tensor, input: MultivariateNormal, *params: Any, **kwargs: Any
) -> Tensor:
mean, variance = input.mean, input.variance
num_event_dim = len(input.event_shape)
noise = self._shaped_noise_covar(mean.shape, *params, **kwargs).diag()
# Potentially reshape the noise to deal with the multitask case
noise = noise.view(*noise.shape[:-1], *input.event_shape)
res = (
((target - mean) ** 2 + variance) / noise + noise.log() + math.log(2 * math.pi)
)
res = res.mul(-0.5)
if num_event_dim > 1: # Do appropriate summation for multitask Gaussian likelihoods
res = res.sum(list(range(-1, -num_event_dim, -1)))
return res
|
def expected_log_prob(
self, target: Tensor, input: MultivariateNormal, *params: Any, **kwargs: Any
) -> Tensor:
mean, variance = input.mean, input.variance
noise = self.noise_covar.noise
res = (
((target - mean) ** 2 + variance) / noise + noise.log() + math.log(2 * math.pi)
)
return res.mul(-0.5).sum(-1)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __init__(self, max_plate_nesting=1):
super().__init__()
self._register_load_state_dict_pre_hook(self._batch_shape_state_dict_hook)
self.max_plate_nesting = max_plate_nesting
|
def __init__(self):
super().__init__()
self._register_load_state_dict_pre_hook(self._batch_shape_state_dict_hook)
self.quadrature = GaussHermiteQuadrature1D()
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def expected_log_prob(self, observations, function_dist, *args, **kwargs):
likelihood_samples = self._draw_likelihood_samples(function_dist, *args, **kwargs)
res = likelihood_samples.log_prob(observations).mean(dim=0)
return res
|
def expected_log_prob(self, observations, function_dist, *params, **kwargs):
"""
Computes the expected log likelihood (used for variational inference):
.. math::
\mathbb{E}_{f(x)} \left[ \log p \left( y \mid f(x) \right) \right]
Args:
:attr:`function_dist` (:class:`gpytorch.distributions.MultivariateNormal`)
Distribution for :math:`f(x)`.
:attr:`observations` (:class:`torch.Tensor`)
Values of :math:`y`.
:attr:`kwargs`
Returns
`torch.Tensor` (log probability)
"""
log_prob_lambda = lambda function_samples: self.forward(function_samples).log_prob(
observations
)
log_prob = self.quadrature(log_prob_lambda, function_dist)
return log_prob.sum(tuple(range(-1, -len(function_dist.event_shape) - 1, -1)))
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, function_samples, *args, **kwargs):
raise NotImplementedError
|
def forward(self, function_samples, *params, **kwargs):
"""
Computes the conditional distribution p(y|f) that defines the likelihood.
Args:
:attr:`function_samples`
Samples from the function `f`
:attr:`kwargs`
Returns:
Distribution object (with same shape as :attr:`function_samples`)
"""
raise NotImplementedError
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def marginal(self, function_dist, *args, **kwargs):
res = self._draw_likelihood_samples(function_dist, *args, **kwargs)
return res
|
def marginal(self, function_dist, *params, **kwargs):
"""
Computes a predictive distribution :math:`p(y*|x*)` given either a posterior
distribution :math:`p(f|D,x)` or a prior distribution :math:`p(f|x)` as input.
With both exact inference and variational inference, the form of
:math:`p(f|D,x)` or :math:`p(f|x)` should usually be Gaussian. As a result, input
should usually be a MultivariateNormal specified by the mean and
(co)variance of :math:`p(f|...)`.
Args:
:attr:`function_dist` (:class:`gpytorch.distributions.MultivariateNormal`)
Distribution for :math:`f(x)`.
:attr:`kwargs`
Returns
Distribution object (the marginal distribution, or samples from it)
"""
sample_shape = torch.Size([settings.num_likelihood_samples.value()])
function_samples = function_dist.rsample(sample_shape)
return self.forward(function_samples)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __call__(self, input, *args, **kwargs):
# Conditional
if torch.is_tensor(input):
return super().__call__(input, *args, **kwargs)
# Marginal
elif isinstance(input, MultivariateNormal):
return self.marginal(input, *args, **kwargs)
# Error
else:
raise RuntimeError(
"Likelihoods expects a MultivariateNormal input to make marginal predictions, or a "
"torch.Tensor for conditional predictions. Got a {}".format(
input.__class__.__name__
)
)
|
def __call__(self, input, *params, **kwargs):
# Conditional
if torch.is_tensor(input):
return super().__call__(input, *params, **kwargs)
# Marginal
elif isinstance(input, MultivariateNormal):
return self.marginal(input, *params, **kwargs)
# Error
else:
raise RuntimeError(
"Likelihoods expects a MultivariateNormal input to make marginal predictions, or a "
"torch.Tensor for conditional predictions. Got a {}".format(
input.__class__.__name__
)
)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.quadrature = GaussHermiteQuadrature1D()
|
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._max_plate_nesting = 1
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def get_fantasy_likelihood(self, **kwargs):
""" """
return super().get_fantasy_likelihood(**kwargs)
|
def get_fantasy_likelihood(self, **kwargs):
return deepcopy(self)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def marginal(self, function_dist, *args, **kwargs):
r"""
Computes a predictive distribution :math:`p(y^* | \mathbf x^*)` given either a posterior
distribution :math:`p(\mathbf f | \mathcal D, \mathbf x)` or a
prior distribution :math:`p(\mathbf f|\mathbf x)` as input.
With both exact inference and variational inference, the form of
:math:`p(\mathbf f|\mathcal D, \mathbf x)` or :math:`p(\mathbf f|
\mathbf x)` should usually be Gaussian. As a result, :attr:`function_dist`
should usually be a :obj:`~gpytorch.distributions.MultivariateNormal` specified by the mean and
(co)variance of :math:`p(\mathbf f|...)`.
Args:
:attr:`function_dist` (:class:`~gpytorch.distributions.MultivariateNormal`)
Distribution for :math:`f(x)`.
:attr:`args`, :attr:`kwargs`
Passed to the `forward` function
Returns:
Distribution object (the marginal distribution, or samples from it)
"""
return super().marginal(function_dist, *args, **kwargs)
|
def marginal(self, function_dist, *params, **kwargs):
name_prefix = kwargs.get("name_prefix", "")
num_samples = settings.num_likelihood_samples.value()
with pyro.plate(
name_prefix + ".num_particles_vectorized",
num_samples,
dim=(-self.max_plate_nesting - 1),
):
function_samples_shape = torch.Size(
[num_samples]
+ [1] * (self.max_plate_nesting - len(function_dist.batch_shape) - 1)
)
function_samples = function_dist(function_samples_shape)
if self.training:
return self(function_samples, *params, **kwargs)
else:
guide_trace = pyro.poutine.trace(self.guide).get_trace(*params, **kwargs)
marginal_fn = functools.partial(self.__call__, function_samples)
return pyro.poutine.replay(marginal_fn, trace=guide_trace)(
*params, **kwargs
)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __call__(self, input, *args, **kwargs):
# Conditional
if torch.is_tensor(input):
return super().__call__(input, *args, **kwargs)
# Marginal
elif any(
[
isinstance(input, MultivariateNormal),
isinstance(input, pyro.distributions.Normal),
(
isinstance(input, pyro.distributions.Independent)
and isinstance(input.base_dist, pyro.distributions.Normal)
),
]
):
return self.marginal(input, *args, **kwargs)
# Error
else:
raise RuntimeError(
"Likelihoods expects a MultivariateNormal or Normal input to make marginal predictions, or a "
"torch.Tensor for conditional predictions. Got a {}".format(
input.__class__.__name__
)
)
|
def __call__(self, input, *params, **kwargs):
# Conditional
if torch.is_tensor(input):
return super().__call__(input, *params, **kwargs)
# Marginal
elif any(
[
isinstance(input, MultivariateNormal),
isinstance(input, pyro.distributions.Normal),
(
isinstance(input, pyro.distributions.Independent)
and isinstance(input.base_dist, pyro.distributions.Normal)
),
]
):
return self.marginal(input, *params, **kwargs)
# Error
else:
raise RuntimeError(
"Likelihoods expects a MultivariateNormal or Normal input to make marginal predictions, or a "
"torch.Tensor for conditional predictions. Got a {}".format(
input.__class__.__name__
)
)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(
self, function_samples: Tensor, *params: Any, **kwargs: Any
) -> base_distributions.Normal:
noise = self._shaped_noise_covar(function_samples.shape, *params, **kwargs).diag()
noise = noise.view(*noise.shape[:-1], *function_samples.shape[-2:])
return base_distributions.Independent(
base_distributions.Normal(function_samples, noise.sqrt()), 1
)
|
def forward(
self, function_samples: Tensor, *params: Any, **kwargs: Any
) -> base_distributions.Normal:
noise = self._shaped_noise_covar(function_samples.shape, *params, **kwargs).diag()
noise = noise.view(*noise.shape[:-1], *function_samples.shape[-2:])
return base_distributions.Normal(function_samples, noise.sqrt())
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __init__(self, likelihood, model):
if not isinstance(likelihood, _GaussianLikelihoodBase):
raise RuntimeError("Likelihood must be Gaussian for exact inference")
super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)
|
def __init__(self, likelihood, model):
"""
A special MLL designed for exact inference
Args:
- likelihood: (Likelihood) - the likelihood for the model
- model: (Module) - the exact GP model
"""
if not isinstance(likelihood, _GaussianLikelihoodBase):
raise RuntimeError("Likelihood must be Gaussian for exact inference")
super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, function_dist, target, *params):
r"""
Computes the MLL given :math:`p(\mathbf f)` and :math:`\mathbf y`.
:param ~gpytorch.distributions.MultivariateNormal function_dist: :math:`p(\mathbf f)`
the outputs of the latent function (the :obj:`gpytorch.models.ExactGP`)
:param torch.Tensor target: :math:`\mathbf y` The target values
:rtype: torch.Tensor
:return: Exact MLL. Output shape corresponds to batch shape of the model/input data.
"""
if not isinstance(function_dist, MultivariateNormal):
raise RuntimeError(
"ExactMarginalLogLikelihood can only operate on Gaussian random variables"
)
# Get the log prob of the marginal distribution
output = self.likelihood(function_dist, *params)
res = output.log_prob(target)
# Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
for added_loss_term in self.model.added_loss_terms():
res = res.add(added_loss_term.loss(*params))
# Add log probs of priors on the (functions of) parameters
for _, prior, closure, _ in self.named_priors():
res.add_(prior.log_prob(closure()).sum())
# Scale by the amount of data we have
num_data = target.size(-1)
return res.div_(num_data)
|
def forward(self, output, target, *params):
if not isinstance(output, MultivariateNormal):
raise RuntimeError(
"ExactMarginalLogLikelihood can only operate on Gaussian random variables"
)
# Get the log prob of the marginal distribution
output = self.likelihood(output, *params)
res = output.log_prob(target)
# Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
for added_loss_term in self.model.added_loss_terms():
res = res.add(added_loss_term.loss(*params))
# Add log probs of priors on the (functions of) parameters
for _, prior, closure, _ in self.named_priors():
res.add_(prior.log_prob(closure()).sum())
# Scale by the amount of data we have
num_data = target.size(-1)
return res.div_(num_data)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, output, target, **kwargs):
"""
Computes the MLL given :math:`p(\mathbf f)` and `\mathbf y`
Args:
:attr:`output` (:obj:`gpytorch.distributions.MultivariateNormal`):
:math:`p(\mathbf f)` (or approximation)
the outputs of the latent function (the :obj:`gpytorch.models.GP`)
:attr:`target` (`torch.Tensor`):
:math:`\mathbf y` The target values
:attr:`**kwargs`:
Additional arguments to pass to the likelihood's :attr:`forward` function.
"""
raise NotImplementedError
|
def forward(self, output, target):
"""
Args:
- output: (MultivariateNormal) - the outputs of the latent function
- target: (Variable) - the target values
"""
raise NotImplementedError
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, variational_dist_f, target, **kwargs):
r"""
Computes the Variational ELBO given :math:`q(\mathbf f)` and :math:`\mathbf y`.
Calling this function will call the likelihood's :meth:`~gpytorch.likelihoods.Likelihood.expected_log_prob`
function.
:param ~gpytorch.distributions.MultivariateNormal variational_dist_f: :math:`q(\mathbf f)`
the outputs of the latent function (the :obj:`gpytorch.models.ApproximateGP`)
:param torch.Tensor target: :math:`\mathbf y` The target values
:param kwargs: Additional arguments passed to the
likelihood's :meth:`~gpytorch.likelihoods.Likelihood.expected_log_prob` function.
:rtype: torch.Tensor
:return: Variational ELBO. Output shape corresponds to batch shape of the model/input data.
"""
return super().forward(variational_dist_f, target, **kwargs)
|
def forward(self, variational_dist_f, target, **kwargs):
num_batch = variational_dist_f.event_shape.numel()
log_likelihood = self.likelihood.expected_log_prob(
target, variational_dist_f, **kwargs
).div(num_batch)
kl_divergence = self.model.variational_strategy.kl_divergence()
if kl_divergence.dim() > log_likelihood.dim():
kl_divergence = kl_divergence.sum(-1)
if log_likelihood.numel() == 1:
kl_divergence = kl_divergence.sum()
kl_divergence = kl_divergence.div(self.num_data)
# Add any additional registered loss terms
added_loss = torch.zeros_like(kl_divergence)
had_added_losses = False
for added_loss_term in self.model.added_loss_terms():
added_loss.add_(added_loss_term.loss())
had_added_losses = True
if self.combine_terms:
res = log_likelihood - kl_divergence
for _, prior, closure, _ in self.named_priors():
res.add_(prior.log_prob(closure()).sum().div(self.num_data))
return res + added_loss
else:
log_prior = torch.zeros_like(log_likelihood)
for _, prior, closure, _ in self.named_priors():
log_prior.add_(prior.log_prob(closure()).sum())
if had_added_losses:
return (
log_likelihood,
kl_divergence,
log_prior.div(self.num_data),
added_loss,
)
else:
return log_likelihood, kl_divergence, log_prior.div(self.num_data)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, variational_dist_f, target, **kwargs):
r"""
Computes the Variational ELBO given :math:`q(\mathbf f)` and :math:`\mathbf y`.
Calling this function will call the likelihood's :meth:`~gpytorch.likelihoods.Likelihood.expected_log_prob`
function.
:param ~gpytorch.distributions.MultivariateNormal variational_dist_f: :math:`q(\mathbf f)`
the outputs of the latent function (the :obj:`gpytorch.models.ApproximateGP`)
:param torch.Tensor target: :math:`\mathbf y` The target values
:param kwargs: Additional arguments passed to the
likelihood's :meth:`~gpytorch.likelihoods.Likelihood.expected_log_prob` function.
:rtype: torch.Tensor
:return: Variational ELBO. Output shape corresponds to batch shape of the model/input data.
"""
return super().forward(variational_dist_f, target, **kwargs)
|
def forward(self, variational_dist_f, target, **kwargs):
num_batch = variational_dist_f.event_shape[0]
variational_dist_u = self.model.variational_strategy.variational_distribution.variational_distribution
prior_dist = self.model.variational_strategy.prior_distribution
log_likelihood = self.likelihood.expected_log_prob(
target, variational_dist_f, **kwargs
)
log_likelihood = log_likelihood.div(num_batch)
num_samples = settings.num_likelihood_samples.value()
variational_samples = variational_dist_u.rsample(torch.Size([num_samples]))
kl_divergence = (
variational_dist_u.log_prob(variational_samples)
- prior_dist.log_prob(variational_samples)
).mean(0)
kl_divergence = kl_divergence.div(self.num_data)
res = log_likelihood - kl_divergence
for _, prior, closure, _ in self.named_priors():
res.add_(prior.log_prob(closure()).sum().div(self.num_data))
return res
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __init__(self, *args, **kwargs):
warnings.warn("PyroVariationalGP has been renamed to PyroGP.", DeprecationWarning)
super().__init__(*args, **kwargs)
|
def __init__(self, *args, **kwargs):
raise RuntimeError(
"Cannot use a PyroVariationalGP because you dont have Pyro installed."
)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def sub_variational_strategies(self):
if not hasattr(self, "_sub_variational_strategies_memo"):
self._sub_variational_strategies_memo = [
module.variational_strategy
for module in self.model.modules()
if isinstance(module, ApproximateGP)
]
return self._sub_variational_strategies_memo
|
def sub_variational_strategies(self):
if not hasattr(self, "_sub_variational_strategies_memo"):
self._sub_variational_strategies_memo = [
module.variational_strategy
for module in self.model.modules()
if isinstance(module, AbstractVariationalGP)
]
return self._sub_variational_strategies_memo
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __call__(self, inputs, are_samples=False, **kwargs):
"""
Forward data through this hidden GP layer. The output is a MultitaskMultivariateNormal distribution
(or MultivariateNormal distribution is output_dims=None).
If the input is >=2 dimensional Tensor (e.g. `n x d`), we pass the input through each hidden GP,
resulting in a `n x h` multitask Gaussian distribution (where all of the `h` tasks represent an
output dimension and are independent from one another). We then draw `s` samples from these Gaussians,
resulting in a `s x n x h` MultitaskMultivariateNormal distribution.
If the input is a >=3 dimensional Tensor, and the `are_samples=True` kwarg is set, then we assume that
the outermost batch dimension is a samples dimension. The output will have the same number of samples.
For example, a `s x b x n x d` input will result in a `s x b x n x h` MultitaskMultivariateNormal distribution.
The goal of these last two points is that if you have a tensor `x` that is `n x d`, then:
>>> hidden_gp2(hidden_gp(x))
will just work, and return a tensor of size `s x n x h2`, where `h2` is the output dimensionality of
hidden_gp2. In this way, hidden GP layers are easily composable.
"""
deterministic_inputs = not are_samples
if isinstance(inputs, MultitaskMultivariateNormal):
inputs = torch.distributions.Normal(
loc=inputs.mean, scale=inputs.variance.sqrt()
).rsample()
deterministic_inputs = False
if settings.debug.on():
if not torch.is_tensor(inputs):
raise ValueError(
"`inputs` should either be a MultitaskMultivariateNormal or a Tensor, got "
f"{inputs.__class__.__Name__}"
)
if inputs.size(-1) != self.input_dims:
raise RuntimeError(
f"Input shape did not match self.input_dims. Got total feature dims [{inputs.size(-1)}],"
f" expected [{self.input_dims}]"
)
# Repeat the input for all possible outputs
if self.output_dims is not None:
inputs = inputs.unsqueeze(-3)
inputs = inputs.expand(*inputs.shape[:-3], self.output_dims, *inputs.shape[-2:])
# Now run samples through the GP
output = ApproximateGP.__call__(self, inputs)
if self.output_dims is not None:
mean = output.loc.transpose(-1, -2)
covar = BlockDiagLazyTensor(output.lazy_covariance_matrix, block_dim=-3)
output = MultitaskMultivariateNormal(mean, covar, interleaved=False)
# Maybe expand inputs?
if deterministic_inputs:
output = output.expand(
torch.Size([settings.num_likelihood_samples.value()]) + output.batch_shape
)
return output
|
def __call__(self, inputs, are_samples=False, **kwargs):
"""
Forward data through this hidden GP layer. The output is a MultitaskMultivariateNormal distribution
(or MultivariateNormal distribution is output_dims=None).
If the input is >=2 dimensional Tensor (e.g. `n x d`), we pass the input through each hidden GP,
resulting in a `n x h` multitask Gaussian distribution (where all of the `h` tasks represent an
output dimension and are independent from one another). We then draw `s` samples from these Gaussians,
resulting in a `s x n x h` MultitaskMultivariateNormal distribution.
If the input is a >=3 dimensional Tensor, and the `are_samples=True` kwarg is set, then we assume that
the outermost batch dimension is a samples dimension. The output will have the same number of samples.
For example, a `s x b x n x d` input will result in a `s x b x n x h` MultitaskMultivariateNormal distribution.
The goal of these last two points is that if you have a tensor `x` that is `n x d`, then:
>>> hidden_gp2(hidden_gp(x))
will just work, and return a tensor of size `s x n x h2`, where `h2` is the output dimensionality of
hidden_gp2. In this way, hidden GP layers are easily composable.
"""
deterministic_inputs = not are_samples
if isinstance(inputs, MultitaskMultivariateNormal):
inputs = torch.distributions.Normal(
loc=inputs.mean, scale=inputs.variance.sqrt()
).rsample()
deterministic_inputs = False
if settings.debug.on():
if not torch.is_tensor(inputs):
raise ValueError(
"`inputs` should either be a MultitaskMultivariateNormal or a Tensor, got "
f"{inputs.__class__.__Name__}"
)
if inputs.size(-1) != self.input_dims:
raise RuntimeError(
f"Input shape did not match self.input_dims. Got total feature dims [{inputs.size(-1)}],"
f" expected [{self.input_dims}]"
)
# Repeat the input for all possible outputs
if self.output_dims is not None:
inputs = inputs.unsqueeze(-3)
inputs = inputs.expand(*inputs.shape[:-3], self.output_dims, *inputs.shape[-2:])
# Now run samples through the GP
output = AbstractVariationalGP.__call__(self, inputs)
if self.output_dims is not None:
mean = output.loc.transpose(-1, -2)
covar = BlockDiagLazyTensor(output.lazy_covariance_matrix, block_dim=-3)
output = MultitaskMultivariateNormal(mean, covar, interleaved=False)
# Maybe expand inputs?
if deterministic_inputs:
output = output.expand(
torch.Size([settings.num_likelihood_samples.value()]) + output.batch_shape
)
return output
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
if x.ndimension() == 1:
x = x.unsqueeze(-1)
elif x.ndimension() != 2:
raise RuntimeError(
"AdditiveGridInterpolationVariationalStrategy expects a 2d tensor."
)
num_data, num_dim = x.size()
if num_dim != self.num_dim:
raise RuntimeError("The number of dims should match the number specified.")
output = super().forward(
x, inducing_points, inducing_values, variational_inducing_covar
)
if self.sum_output:
if variational_inducing_covar is not None:
mean = output.mean.sum(0)
covar = output.lazy_covariance_matrix.sum(-3)
return MultivariateNormal(mean, covar)
else:
return Delta(output.mean.sum(0))
else:
return output
|
def forward(self, x):
if x.ndimension() == 1:
x = x.unsqueeze(-1)
elif x.ndimension() != 2:
raise RuntimeError(
"AdditiveGridInterpolationVariationalStrategy expects a 2d tensor."
)
num_data, num_dim = x.size()
if num_dim != self.num_dim:
raise RuntimeError("The number of dims should match the number specified.")
output = super(AdditiveGridInterpolationVariationalStrategy, self).forward(x)
if self.sum_output:
mean = output.mean.sum(0)
covar = output.lazy_covariance_matrix.sum(-3)
return MultivariateNormal(mean, covar)
else:
return output
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __init__(
self, num_inducing_points, batch_shape=torch.Size([]), mean_init_std=1e-3, **kwargs
):
super().__init__(
num_inducing_points=num_inducing_points,
batch_shape=batch_shape,
mean_init_std=mean_init_std,
)
mean_init = torch.zeros(num_inducing_points)
covar_init = torch.eye(num_inducing_points, num_inducing_points)
mean_init = mean_init.repeat(*batch_shape, 1)
covar_init = covar_init.repeat(*batch_shape, 1, 1)
self.register_parameter(
name="variational_mean", parameter=torch.nn.Parameter(mean_init)
)
self.register_parameter(
name="chol_variational_covar", parameter=torch.nn.Parameter(covar_init)
)
|
def __init__(self, num_inducing_points, batch_shape=torch.Size([]), **kwargs):
"""
Args:
num_inducing_points (int): Size of the variational distribution. This implies that the variational mean
should be this size, and the variational covariance matrix should have this many rows and columns.
batch_shape (torch.Size, optional): Specifies an optional batch
size for the variational parameters. This is useful for example
when doing additive variational inference.
"""
batch_shape = _deprecate_kwarg_with_transform(
kwargs, "batch_size", "batch_shape", batch_shape, lambda n: torch.Size([n])
)
super(VariationalDistribution, self).__init__()
mean_init = torch.zeros(num_inducing_points)
covar_init = torch.eye(num_inducing_points, num_inducing_points)
mean_init = mean_init.repeat(*batch_shape, 1)
covar_init = covar_init.repeat(*batch_shape, 1, 1)
self.register_parameter(
name="variational_mean", parameter=torch.nn.Parameter(mean_init)
)
self.register_parameter(
name="chol_variational_covar", parameter=torch.nn.Parameter(covar_init)
)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def initialize_variational_distribution(self, prior_dist):
self.variational_mean.data.copy_(prior_dist.mean)
self.variational_mean.data.add_(
self.mean_init_std, torch.randn_like(prior_dist.mean)
)
self.chol_variational_covar.data.copy_(
prior_dist.lazy_covariance_matrix.cholesky().evaluate()
)
|
def initialize_variational_distribution(self, prior_dist):
self.variational_mean.data.copy_(prior_dist.mean)
self.chol_variational_covar.data.copy_(prior_dist.scale_tril)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def _compute_grid(self, inputs):
n_data, n_dimensions = inputs.size(-2), inputs.size(-1)
batch_shape = inputs.shape[:-2]
inputs = inputs.reshape(-1, n_dimensions)
interp_indices, interp_values = Interpolation().interpolate(self.grid, inputs)
interp_indices = interp_indices.view(*batch_shape, n_data, -1)
interp_values = interp_values.view(*batch_shape, n_data, -1)
if (interp_indices.dim() - 2) != len(self._variational_distribution.batch_shape):
batch_shape = _mul_broadcast_shape(
interp_indices.shape[:-2], self._variational_distribution.batch_shape
)
interp_indices = interp_indices.expand(*batch_shape, *interp_indices.shape[-2:])
interp_values = interp_values.expand(*batch_shape, *interp_values.shape[-2:])
return interp_indices, interp_values
|
def _compute_grid(self, inputs):
if inputs.ndimension() == 1:
inputs = inputs.unsqueeze(1)
interp_indices, interp_values = Interpolation().interpolate(self.grid, inputs)
return interp_indices, interp_values
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
if variational_inducing_covar is None:
raise RuntimeError(
"GridInterpolationVariationalStrategy is only compatible with Gaussian variational "
f"distributions. Got ({self.variational_distribution.__class__.__name__}."
)
variational_distribution = self.variational_distribution
# Get interpolations
interp_indices, interp_values = self._compute_grid(x)
# Compute test mean
# Left multiply samples by interpolation matrix
predictive_mean = left_interp(
interp_indices, interp_values, inducing_values.unsqueeze(-1)
)
predictive_mean = predictive_mean.squeeze(-1)
# Compute test covar
predictive_covar = InterpolatedLazyTensor(
variational_distribution.lazy_covariance_matrix,
interp_indices,
interp_values,
interp_indices,
interp_values,
)
output = MultivariateNormal(predictive_mean, predictive_covar)
return output
|
def forward(self, x):
variational_distribution = self.variational_distribution.variational_distribution
# Get interpolations
interp_indices, interp_values = self._compute_grid(x)
# Compute test mean
# Left multiply samples by interpolation matrix
predictive_mean = left_interp(
interp_indices, interp_values, variational_distribution.mean.unsqueeze(-1)
)
predictive_mean = predictive_mean.squeeze(-1)
# Compute test covar
predictive_covar = InterpolatedLazyTensor(
variational_distribution.lazy_covariance_matrix,
interp_indices,
interp_values,
interp_indices,
interp_values,
)
output = MultivariateNormal(predictive_mean, predictive_covar)
return output
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __init__(
self,
model,
inducing_points,
variational_distribution,
learn_inducing_locations=True,
):
super().__init__(
model, inducing_points, variational_distribution, learn_inducing_locations
)
self.register_buffer("updated_strategy", torch.tensor(True))
self._register_load_state_dict_pre_hook(_ensure_updated_strategy_flag_set)
|
def __init__(
self,
model,
inducing_points,
variational_distribution,
learn_inducing_locations=False,
):
"""
Args:
model (:obj:`gpytorch.model.AbstractVariationalGP`): Model this strategy is applied to. Typically passed in
when the VariationalStrategy is created in the __init__ method of the user defined model.
inducing_points (torch.tensor): Tensor containing a set of inducing points to use for variational inference.
variational_distribution (:obj:`gpytorch.variational.VariationalDistribution`): A VariationalDistribution
object that represents the form of the variational distribution :math:`q(u)`
learn_inducing_locations (bool): Whether or not the inducing point locations should be learned (e.g. SVGP).
"""
super(VariationalStrategy, self).__init__()
object.__setattr__(self, "model", model)
inducing_points = inducing_points.clone()
if inducing_points.dim() == 1:
inducing_points = inducing_points.unsqueeze(-1)
if learn_inducing_locations:
self.register_parameter(
name="inducing_points", parameter=torch.nn.Parameter(inducing_points)
)
else:
self.register_buffer("inducing_points", inducing_points)
self.variational_distribution = variational_distribution
self.register_buffer("variational_params_initialized", torch.tensor(0))
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def prior_distribution(self):
zeros = torch.zeros_like(self.variational_distribution.mean)
ones = torch.ones_like(zeros)
res = MultivariateNormal(zeros, DiagLazyTensor(ones))
return res
|
def prior_distribution(self):
"""
The :func:`~gpytorch.variational.VariationalStrategy.prior_distribution` method determines how to compute the
GP prior distribution of the inducing points, e.g. :math:`p(u) \sim N(\mu(X_u), K(X_u, X_u))`. Most commonly,
this is done simply by calling the user defined GP prior on the inducing point data directly.
"""
out = self.model.forward(self.inducing_points)
res = MultivariateNormal(out.mean, out.lazy_covariance_matrix.add_jitter())
return res
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
# Compute full prior distribution
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_covar = full_output.lazy_covariance_matrix
# Covariance terms
num_induc = inducing_points.size(-2)
test_mean = full_output.mean[..., num_induc:]
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# Compute interpolation terms
# K_ZZ^{-1/2} K_ZX
# K_ZZ^{-1/2} \mu_Z
L = self._cholesky_factor(induc_induc_covar)
interp_term = torch.triangular_solve(induc_data_covar.double(), L, upper=False)[
0
].to(full_inputs.dtype)
# Compute the mean of q(f)
# k_XZ K_ZZ^{-1/2} (m - K_ZZ^{-1/2} \mu_Z) + \mu_X
predictive_mean = (
torch.matmul(
interp_term.transpose(-1, -2),
(inducing_values - self.prior_distribution.mean).unsqueeze(-1),
).squeeze(-1)
+ test_mean
)
# Compute the covariance of q(f)
# K_XX + k_XZ K_ZZ^{-1/2} (S - I) K_ZZ^{-1/2} k_ZX
middle_term = self.prior_distribution.lazy_covariance_matrix.mul(-1)
if variational_inducing_covar is not None:
middle_term = SumLazyTensor(variational_inducing_covar, middle_term)
predictive_covar = SumLazyTensor(
data_data_covar.add_jitter(1e-4),
MatmulLazyTensor(interp_term.transpose(-1, -2), middle_term @ interp_term),
)
# Return the distribution
return MultivariateNormal(predictive_mean, predictive_covar)
|
def forward(self, x):
"""
The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
inducing point function values. Specifically, forward defines how to transform a variational distribution
over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
Args:
x (torch.tensor): Locations x to get the variational posterior of the function values at.
Returns:
:obj:`gpytorch.distributions.MultivariateNormal`: The distribution q(f|x)
"""
variational_dist = self.variational_distribution.variational_distribution
inducing_points = self.inducing_points
inducing_batch_shape = inducing_points.shape[:-2]
if inducing_batch_shape < x.shape[:-2]:
batch_shape = _mul_broadcast_shape(inducing_points.shape[:-2], x.shape[:-2])
inducing_points = inducing_points.expand(
*batch_shape, *inducing_points.shape[-2:]
)
x = x.expand(*batch_shape, *x.shape[-2:])
variational_dist = variational_dist.expand(batch_shape)
# If our points equal the inducing points, we're done
if torch.equal(x, inducing_points):
return variational_dist
# Otherwise, we have to marginalize
else:
num_induc = inducing_points.size(-2)
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_mean, full_covar = full_output.mean, full_output.lazy_covariance_matrix
# Mean terms
test_mean = full_mean[..., num_induc:]
induc_mean = full_mean[..., :num_induc]
mean_diff = (variational_dist.mean - induc_mean).unsqueeze(-1)
# Covariance terms
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
root_variational_covar = (
variational_dist.lazy_covariance_matrix.root_decomposition().root.evaluate()
)
# If we had to expand the inducing points, shrink the inducing mean and induc_induc_covar dimension
# This makes everything more computationally efficient
if len(inducing_batch_shape) < len(induc_induc_covar.batch_shape):
index = tuple(
0
for _ in range(
len(induc_induc_covar.batch_shape) - len(inducing_batch_shape)
)
)
repeat_size = torch.Size(
(
tuple(induc_induc_covar.batch_shape[: len(index)])
+ tuple(1 for _ in induc_induc_covar.batch_shape[len(index) :])
)
)
induc_induc_covar = BatchRepeatLazyTensor(
induc_induc_covar.__getitem__(index), repeat_size
)
# If we're less than a certain size, we'll compute the Cholesky decomposition of induc_induc_covar
cholesky = False
if settings.fast_computations.log_prob.off() or (
num_induc <= settings.max_cholesky_size.value()
):
induc_induc_covar = CholLazyTensor(induc_induc_covar.cholesky())
cholesky = True
# If we are making predictions and don't need variances, we can do things very quickly.
if not self.training and settings.skip_posterior_variances.on():
if not hasattr(self, "_mean_cache"):
self._mean_cache = induc_induc_covar.inv_matmul(mean_diff).detach()
predictive_mean = torch.add(
test_mean,
induc_data_covar.transpose(-2, -1).matmul(self._mean_cache).squeeze(-1),
)
predictive_covar = ZeroLazyTensor(test_mean.size(-1), test_mean.size(-1))
return MultivariateNormal(predictive_mean, predictive_covar)
# Cache the CG results
# For now: run variational inference without a preconditioner
# The preconditioner screws things up for some reason
with settings.max_preconditioner_size(0):
# Cache the CG results
left_tensors = torch.cat([mean_diff, root_variational_covar], -1)
with torch.no_grad():
eager_rhs = torch.cat([left_tensors, induc_data_covar], -1)
solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = (
CachedCGLazyTensor.precompute_terms(
induc_induc_covar,
eager_rhs.detach(),
logdet_terms=(not cholesky),
include_tmats=(
not settings.skip_logdet_forward.on() and not cholesky
),
)
)
eager_rhss = [
eager_rhs.detach(),
eager_rhs[..., left_tensors.size(-1) :].detach(),
eager_rhs[..., : left_tensors.size(-1)].detach(),
]
solves = [
solve.detach(),
solve[..., left_tensors.size(-1) :].detach(),
solve[..., : left_tensors.size(-1)].detach(),
]
if settings.skip_logdet_forward.on():
eager_rhss.append(torch.cat([probe_vecs, left_tensors], -1))
solves.append(
torch.cat(
[probe_vec_solves, solve[..., : left_tensors.size(-1)]], -1
)
)
induc_induc_covar = CachedCGLazyTensor(
induc_induc_covar,
eager_rhss=eager_rhss,
solves=solves,
probe_vectors=probe_vecs,
probe_vector_norms=probe_vec_norms,
probe_vector_solves=probe_vec_solves,
probe_vector_tmats=tmats,
)
if self.training:
self._memoize_cache["prior_distribution_memo"] = MultivariateNormal(
induc_mean, induc_induc_covar
)
# Compute predictive mean/covariance
inv_products = induc_induc_covar.inv_matmul(
induc_data_covar, left_tensors.transpose(-1, -2)
)
predictive_mean = torch.add(test_mean, inv_products[..., 0, :])
predictive_covar = RootLazyTensor(inv_products[..., 1:, :].transpose(-1, -2))
if self.training:
interp_data_data_var, _ = induc_induc_covar.inv_quad_logdet(
induc_data_covar, logdet=False, reduce_inv_quad=False
)
data_covariance = DiagLazyTensor(
(data_data_covar.diag() - interp_data_data_var).clamp(0, math.inf)
)
else:
neg_induc_data_data_covar = torch.matmul(
induc_data_covar.transpose(-1, -2).mul(-1),
induc_induc_covar.inv_matmul(induc_data_covar),
)
data_covariance = data_data_covar + neg_induc_data_data_covar
predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
return MultivariateNormal(predictive_mean, predictive_covar)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __call__(self, x, prior=False):
if not self.updated_strategy.item() and not prior:
with torch.no_grad():
# Get unwhitened p(u)
prior_function_dist = self(self.inducing_points, prior=True)
prior_mean = prior_function_dist.loc
L = self._cholesky_factor(
prior_function_dist.lazy_covariance_matrix.add_jitter()
)
# Temporarily turn off noise that's added to the mean
orig_mean_init_std = self._variational_distribution.mean_init_std
self._variational_distribution.mean_init_std = 0.0
# Change the variational parameters to be whitened
variational_dist = self.variational_distribution
whitened_mean = (
torch.triangular_solve(
(variational_dist.loc - prior_mean).unsqueeze(-1).double(),
L,
upper=False,
)[0]
.squeeze(-1)
.to(variational_dist.loc.dtype)
)
whitened_covar = RootLazyTensor(
torch.triangular_solve(
variational_dist.lazy_covariance_matrix.root_decomposition()
.root.evaluate()
.double(),
L,
upper=False,
)[0].to(variational_dist.loc.dtype)
)
whitened_variational_distribution = variational_dist.__class__(
whitened_mean, whitened_covar
)
self._variational_distribution.initialize_variational_distribution(
whitened_variational_distribution
)
# Reset the random noise parameter of the model
self._variational_distribution.mean_init_std = orig_mean_init_std
# Reset the cache
if hasattr(self, "_memoize_cache"):
delattr(self, "_memoize_cache")
self._memoize_cache = dict()
# Mark that we have updated the variational strategy
self.updated_strategy.fill_(True)
return super().__call__(x, prior=prior)
|
def __call__(self, x):
if not self.variational_params_initialized.item():
self.initialize_variational_dist()
self.variational_params_initialized.fill_(1)
if self.training:
if hasattr(self, "_memoize_cache"):
delattr(self, "_memoize_cache")
self._memoize_cache = dict()
return super(VariationalStrategy, self).__call__(x)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def covar_trace(self):
variational_covar = self.variational_distribution.covariance_matrix
prior_covar = self.prior_distribution.covariance_matrix
batch_shape = prior_covar.shape[:-2]
return (variational_covar * prior_covar).view(*batch_shape, -1).sum(-1)
|
def covar_trace(self):
variational_covar = (
self.variational_distribution.variational_distribution.covariance_matrix
)
prior_covar = self.prior_distribution.covariance_matrix
batch_shape = prior_covar.shape[:-2]
return (variational_covar * prior_covar).view(*batch_shape, -1).sum(-1)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def mean_diff_inv_quad(self):
prior_mean = self.prior_distribution.mean
prior_covar = self.prior_distribution.lazy_covariance_matrix
variational_mean = self.variational_distribution.mean
return prior_covar.inv_quad(variational_mean - prior_mean)
|
def mean_diff_inv_quad(self):
prior_mean = self.prior_distribution.mean
prior_covar = self.prior_distribution.lazy_covariance_matrix
variational_mean = self.variational_distribution.variational_distribution.mean
return prior_covar.inv_quad(variational_mean - prior_mean)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def kl_divergence(self):
variational_dist_u = self.variational_distribution
prior_dist = self.prior_distribution
kl_divergence = 0.5 * sum(
[
# log|k| - log|S|
# = log|K| - log|K var_dist_covar K|
# = -log|K| - log|var_dist_covar|
self.prior_covar_logdet(),
-variational_dist_u.lazy_covariance_matrix.logdet(),
# tr(K^-1 S) = tr(K^1 K var_dist_covar K) = tr(K var_dist_covar)
self.covar_trace(),
# (m - \mu u)^T K^-1 (m - \mu u)
# = (K^-1 (m - \mu u)) K (K^1 (m - \mu u))
# = (var_dist_mean)^T K (var_dist_mean)
self.mean_diff_inv_quad(),
# d
-prior_dist.event_shape.numel(),
]
)
return kl_divergence
|
def kl_divergence(self):
variational_dist_u = self.variational_distribution.variational_distribution
prior_dist = self.prior_distribution
kl_divergence = 0.5 * sum(
[
# log|k| - log|S|
# = log|K| - log|K var_dist_covar K|
# = -log|K| - log|var_dist_covar|
self.prior_covar_logdet(),
-variational_dist_u.lazy_covariance_matrix.logdet(),
# tr(K^-1 S) = tr(K^1 K var_dist_covar K) = tr(K var_dist_covar)
self.covar_trace(),
# (m - \mu u)^T K^-1 (m - \mu u)
# = (K^-1 (m - \mu u)) K (K^1 (m - \mu u))
# = (var_dist_mean)^T K (var_dist_mean)
self.mean_diff_inv_quad(),
# d
-prior_dist.event_shape.numel(),
]
)
return kl_divergence
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def forward(self, x):
"""
The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
inducing point function values. Specifically, forward defines how to transform a variational distribution
over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
Args:
x (torch.tensor): Locations x to get the variational posterior of the function values at.
Returns:
:obj:`gpytorch.distributions.MultivariateNormal`: The distribution q(f|x)
"""
variational_dist = self.variational_distribution
inducing_points = self.inducing_points
if inducing_points.dim() < x.dim():
inducing_points = inducing_points.expand(
*x.shape[:-2], *inducing_points.shape[-2:]
)
if len(variational_dist.batch_shape) < x.dim() - 2:
variational_dist = variational_dist.expand(x.shape[:-2])
# If our points equal the inducing points, we're done
if torch.equal(x, inducing_points):
# De-whiten the prior covar
prior_covar = self.prior_distribution.lazy_covariance_matrix
if isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor):
predictive_covar = RootLazyTensor(
prior_covar @ variational_dist.lazy_covariance_matrix.root.evaluate()
)
else:
predictive_covar = MatmulLazyTensor(
prior_covar @ variational_dist.covariance_matrix, prior_covar
)
# Cache some values for the KL divergence
if self.training:
self._mean_diff_inv_quad_memo, self._logdet_memo = (
prior_covar.inv_quad_logdet(
(variational_dist.mean - self.prior_distribution.mean), logdet=True
)
)
return MultivariateNormal(variational_dist.mean, predictive_covar)
# Otherwise, we have to marginalize
else:
num_induc = inducing_points.size(-2)
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_mean, full_covar = full_output.mean, full_output.lazy_covariance_matrix
# Mean terms
test_mean = full_mean[..., num_induc:]
induc_mean = full_mean[..., :num_induc]
mean_diff = (variational_dist.mean - induc_mean).unsqueeze(-1)
# Covariance terms
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# If we're less than a certain size, we'll compute the Cholesky decomposition of induc_induc_covar
cholesky = False
if settings.fast_computations.log_prob.off() or (
num_induc <= settings.max_cholesky_size.value()
):
induc_induc_covar = CholLazyTensor(induc_induc_covar.cholesky())
cholesky = True
# Cache the CG results
# Do not use preconditioning for whitened VI, as it does not seem to improve performance.
with settings.max_preconditioner_size(0):
with torch.no_grad():
eager_rhs = torch.cat([induc_data_covar, mean_diff], -1)
solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = (
CachedCGLazyTensor.precompute_terms(
induc_induc_covar,
eager_rhs.detach(),
logdet_terms=(not cholesky),
include_tmats=(
not settings.skip_logdet_forward.on() and not cholesky
),
)
)
eager_rhss = [eager_rhs.detach()]
solves = [solve.detach()]
if settings.skip_logdet_forward.on() and self.training:
eager_rhss.append(torch.cat([probe_vecs, eager_rhs], -1))
solves.append(
torch.cat(
[probe_vec_solves, solve[..., : eager_rhs.size(-1)]], -1
)
)
elif not self.training:
eager_rhss.append(eager_rhs[..., :-1])
solves.append(solve[..., :-1])
induc_induc_covar = CachedCGLazyTensor(
induc_induc_covar,
eager_rhss=eager_rhss,
solves=solves,
probe_vectors=probe_vecs,
probe_vector_norms=probe_vec_norms,
probe_vector_solves=probe_vec_solves,
probe_vector_tmats=tmats,
)
# Compute some terms that will be necessary for the predicitve covariance and KL divergence
if self.training:
interp_data_data_var_plus_mean_diff_inv_quad, logdet = (
induc_induc_covar.inv_quad_logdet(
torch.cat([induc_data_covar, mean_diff], -1),
logdet=True,
reduce_inv_quad=False,
)
)
interp_data_data_var = interp_data_data_var_plus_mean_diff_inv_quad[
..., :-1
]
mean_diff_inv_quad = interp_data_data_var_plus_mean_diff_inv_quad[..., -1]
# Compute predictive mean
predictive_mean = torch.add(
test_mean,
induc_induc_covar.inv_matmul(
mean_diff, left_tensor=induc_data_covar.transpose(-1, -2)
).squeeze(-1),
)
# Compute the predictive covariance
is_root_lt = isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor)
is_repeated_root_lt = isinstance(
variational_dist.lazy_covariance_matrix, BatchRepeatLazyTensor
) and isinstance(
variational_dist.lazy_covariance_matrix.base_lazy_tensor, RootLazyTensor
)
if is_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2)
@ variational_dist.lazy_covariance_matrix.root.evaluate()
)
elif is_repeated_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2)
@ variational_dist.lazy_covariance_matrix.root_decomposition().root.evaluate()
)
else:
predictive_covar = MatmulLazyTensor(
induc_data_covar.transpose(-1, -2), predictive_covar @ induc_data_covar
)
if self.training:
data_covariance = DiagLazyTensor(
(data_data_covar.diag() - interp_data_data_var).clamp(0, math.inf)
)
else:
neg_induc_data_data_covar = torch.matmul(
induc_data_covar.transpose(-1, -2).mul(-1),
induc_induc_covar.inv_matmul(induc_data_covar),
)
data_covariance = data_data_covar + neg_induc_data_data_covar
predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
# Save the logdet, mean_diff_inv_quad, prior distribution for the ELBO
if self.training:
self._memoize_cache["prior_distribution_memo"] = MultivariateNormal(
induc_mean, induc_induc_covar
)
self._memoize_cache["logdet_memo"] = -logdet
self._memoize_cache["mean_diff_inv_quad_memo"] = mean_diff_inv_quad
return MultivariateNormal(predictive_mean, predictive_covar)
|
def forward(self, x):
"""
The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
inducing point function values. Specifically, forward defines how to transform a variational distribution
over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
Args:
x (torch.tensor): Locations x to get the variational posterior of the function values at.
Returns:
:obj:`gpytorch.distributions.MultivariateNormal`: The distribution q(f|x)
"""
variational_dist = self.variational_distribution.variational_distribution
inducing_points = self.inducing_points
if inducing_points.dim() < x.dim():
inducing_points = inducing_points.expand(
*x.shape[:-2], *inducing_points.shape[-2:]
)
if len(variational_dist.batch_shape) < x.dim() - 2:
variational_dist = variational_dist.expand(x.shape[:-2])
# If our points equal the inducing points, we're done
if torch.equal(x, inducing_points):
# De-whiten the prior covar
prior_covar = self.prior_distribution.lazy_covariance_matrix
if isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor):
predictive_covar = RootLazyTensor(
prior_covar @ variational_dist.lazy_covariance_matrix.root.evaluate()
)
else:
predictive_covar = MatmulLazyTensor(
prior_covar @ variational_dist.covariance_matrix, prior_covar
)
# Cache some values for the KL divergence
if self.training:
self._mean_diff_inv_quad_memo, self._logdet_memo = (
prior_covar.inv_quad_logdet(
(variational_dist.mean - self.prior_distribution.mean), logdet=True
)
)
return MultivariateNormal(variational_dist.mean, predictive_covar)
# Otherwise, we have to marginalize
else:
num_induc = inducing_points.size(-2)
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_mean, full_covar = full_output.mean, full_output.lazy_covariance_matrix
# Mean terms
test_mean = full_mean[..., num_induc:]
induc_mean = full_mean[..., :num_induc]
mean_diff = (variational_dist.mean - induc_mean).unsqueeze(-1)
# Covariance terms
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# If we're less than a certain size, we'll compute the Cholesky decomposition of induc_induc_covar
cholesky = False
if settings.fast_computations.log_prob.off() or (
num_induc <= settings.max_cholesky_size.value()
):
induc_induc_covar = CholLazyTensor(induc_induc_covar.cholesky())
cholesky = True
# Cache the CG results
# Do not use preconditioning for whitened VI, as it does not seem to improve performance.
with settings.max_preconditioner_size(0):
with torch.no_grad():
eager_rhs = torch.cat([induc_data_covar, mean_diff], -1)
solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = (
CachedCGLazyTensor.precompute_terms(
induc_induc_covar,
eager_rhs.detach(),
logdet_terms=(not cholesky),
include_tmats=(
not settings.skip_logdet_forward.on() and not cholesky
),
)
)
eager_rhss = [eager_rhs.detach()]
solves = [solve.detach()]
if settings.skip_logdet_forward.on() and self.training:
eager_rhss.append(torch.cat([probe_vecs, eager_rhs], -1))
solves.append(
torch.cat(
[probe_vec_solves, solve[..., : eager_rhs.size(-1)]], -1
)
)
elif not self.training:
eager_rhss.append(eager_rhs[..., :-1])
solves.append(solve[..., :-1])
induc_induc_covar = CachedCGLazyTensor(
induc_induc_covar,
eager_rhss=eager_rhss,
solves=solves,
probe_vectors=probe_vecs,
probe_vector_norms=probe_vec_norms,
probe_vector_solves=probe_vec_solves,
probe_vector_tmats=tmats,
)
# Compute some terms that will be necessary for the predicitve covariance and KL divergence
if self.training:
interp_data_data_var_plus_mean_diff_inv_quad, logdet = (
induc_induc_covar.inv_quad_logdet(
torch.cat([induc_data_covar, mean_diff], -1),
logdet=True,
reduce_inv_quad=False,
)
)
interp_data_data_var = interp_data_data_var_plus_mean_diff_inv_quad[
..., :-1
]
mean_diff_inv_quad = interp_data_data_var_plus_mean_diff_inv_quad[..., -1]
# Compute predictive mean
predictive_mean = torch.add(
test_mean,
induc_induc_covar.inv_matmul(
mean_diff, left_tensor=induc_data_covar.transpose(-1, -2)
).squeeze(-1),
)
# Compute the predictive covariance
is_root_lt = isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor)
is_repeated_root_lt = isinstance(
variational_dist.lazy_covariance_matrix, BatchRepeatLazyTensor
) and isinstance(
variational_dist.lazy_covariance_matrix.base_lazy_tensor, RootLazyTensor
)
if is_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2)
@ variational_dist.lazy_covariance_matrix.root.evaluate()
)
elif is_repeated_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2)
@ variational_dist.lazy_covariance_matrix.root_decomposition().root.evaluate()
)
else:
predictive_covar = MatmulLazyTensor(
induc_data_covar.transpose(-1, -2), predictive_covar @ induc_data_covar
)
if self.training:
data_covariance = DiagLazyTensor(
(data_data_covar.diag() - interp_data_data_var).clamp(0, math.inf)
)
else:
neg_induc_data_data_covar = torch.matmul(
induc_data_covar.transpose(-1, -2).mul(-1),
induc_induc_covar.inv_matmul(induc_data_covar),
)
data_covariance = data_data_covar + neg_induc_data_data_covar
predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
# Save the logdet, mean_diff_inv_quad, prior distribution for the ELBO
if self.training:
self._memoize_cache["prior_distribution_memo"] = MultivariateNormal(
induc_mean, induc_induc_covar
)
self._memoize_cache["logdet_memo"] = -logdet
self._memoize_cache["mean_diff_inv_quad_memo"] = mean_diff_inv_quad
return MultivariateNormal(predictive_mean, predictive_covar)
|
https://github.com/cornellius-gp/gpytorch/issues/905
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-6-c955f39ee560> in <module>
4 with torch.no_grad():
5 for x_batch, y_batch in test_loader:
----> 6 preds = model(x_batch)
7 means = torch.cat([means, preds.mean.cpu()])
8 means = means[1:]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/models/abstract_variational_gp.py in __call__(self, inputs, **kwargs)
20 inputs = inputs.unsqueeze(-1)
21
---> 22 return self.variational_strategy(inputs)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in __call__(self, x)
228 self._memoize_cache = dict()
229
--> 230 return super(VariationalStrategy, self).__call__(x)
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/variational/variational_strategy.py in forward(self, x)
214 induc_induc_covar.inv_matmul(induc_data_covar)
215 )
--> 216 data_covariance = data_data_covar + neg_induc_data_data_covar
217 predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
218
~/miniconda3/envs/mbrl/lib/python3.7/site-packages/gpytorch/lazy/sum_lazy_tensor.py in __add__(self, other)
75 return SumLazyTensor(*(list(self.lazy_tensors) + [other]))
76 else:
---> 77 raise AttributeError("other must be a LazyTensor")
78
79 def diag(self):
AttributeError: other must be a LazyTensor
|
AttributeError
|
def __init__(
self,
base_lazy_tensor,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=None,
right_interp_values=None,
):
base_lazy_tensor = lazify(base_lazy_tensor)
if left_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
left_interp_indices = torch.arange(
0, num_rows, dtype=torch.long, device=base_lazy_tensor.device
)
left_interp_indices.unsqueeze_(-1)
left_interp_indices = left_interp_indices.expand(
*base_lazy_tensor.batch_shape, num_rows, 1
)
if left_interp_values is None:
left_interp_values = torch.ones(
left_interp_indices.size(),
dtype=base_lazy_tensor.dtype,
device=base_lazy_tensor.device,
)
if right_interp_indices is None:
num_cols = base_lazy_tensor.size(-1)
right_interp_indices = torch.arange(
0, num_cols, dtype=torch.long, device=base_lazy_tensor.device
)
right_interp_indices.unsqueeze_(-1)
right_interp_indices = right_interp_indices.expand(
*base_lazy_tensor.batch_shape, num_cols, 1
)
if right_interp_values is None:
right_interp_values = torch.ones(
right_interp_indices.size(),
dtype=base_lazy_tensor.dtype,
device=base_lazy_tensor.device,
)
if left_interp_indices.shape[:-2] != base_lazy_tensor.batch_shape:
try:
base_lazy_tensor = base_lazy_tensor._expand_batch(
left_interp_indices.shape[:-2]
)
except RuntimeError:
raise RuntimeError(
"interp size ({}) is incompatible with base_lazy_tensor size ({}). ".format(
right_interp_indices.size(), base_lazy_tensor.size()
)
)
super(InterpolatedLazyTensor, self).__init__(
base_lazy_tensor,
left_interp_indices,
left_interp_values,
right_interp_indices,
right_interp_values,
)
self.base_lazy_tensor = base_lazy_tensor
self.left_interp_indices = left_interp_indices
self.left_interp_values = left_interp_values
self.right_interp_indices = right_interp_indices
self.right_interp_values = right_interp_values
|
def __init__(
self,
base_lazy_tensor,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=None,
right_interp_values=None,
):
base_lazy_tensor = lazify(base_lazy_tensor)
if left_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
left_interp_indices = torch.arange(
0, num_rows, dtype=torch.long, device=base_lazy_tensor.device
)
left_interp_indices.unsqueeze_(-1)
left_interp_indices = left_interp_indices.expand(
*base_lazy_tensor.batch_shape, num_rows, 1
)
if left_interp_values is None:
left_interp_values = torch.ones(
left_interp_indices.size(),
dtype=base_lazy_tensor.dtype,
device=base_lazy_tensor.device,
)
if right_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
right_interp_indices = torch.arange(
0, num_rows, dtype=torch.long, device=base_lazy_tensor.device
)
right_interp_indices.unsqueeze_(-1)
right_interp_indices = right_interp_indices.expand(
*base_lazy_tensor.batch_shape, num_rows, 1
)
if right_interp_values is None:
right_interp_values = torch.ones(
right_interp_indices.size(),
dtype=base_lazy_tensor.dtype,
device=base_lazy_tensor.device,
)
if left_interp_indices.shape[:-2] != base_lazy_tensor.batch_shape:
try:
base_lazy_tensor = base_lazy_tensor._expand_batch(
left_interp_indices.shape[:-2]
)
except RuntimeError:
raise RuntimeError(
"interp size ({}) is incompatible with base_lazy_tensor size ({}). ".format(
right_interp_indices.size(), base_lazy_tensor.size()
)
)
super(InterpolatedLazyTensor, self).__init__(
base_lazy_tensor,
left_interp_indices,
left_interp_values,
right_interp_indices,
right_interp_values,
)
self.base_lazy_tensor = base_lazy_tensor
self.left_interp_indices = left_interp_indices
self.left_interp_values = left_interp_values
self.right_interp_indices = right_interp_indices
self.right_interp_values = right_interp_values
|
https://github.com/cornellius-gp/gpytorch/issues/900
|
(py37) vdhiman@dwarf:~/wrk/BayesCBF_ws/BayesCBF$ python tests/test_interpolated_lazy_tensor.py
Traceback (most recent call last):
File "tests/test_interpolated_lazy_tensor.py", line 30, in <module>
test_interpolated_lazy_tensor()
File "tests/test_interpolated_lazy_tensor.py", line 7, in test_interpolated_lazy_tensor
res = itplzt @ torch.eye(3)
File "/home/vdhiman/wrk/gpytorch/gpytorch/lazy/lazy_tensor.py", line 1731, in __matmul__
return self.matmul(other)
File "/home/vdhiman/wrk/gpytorch/gpytorch/lazy/interpolated_lazy_tensor.py", line 393, in matmul
right_interp_res = left_t_interp(self.right_interp_indices, self.right_interp_values, tensor, base_size)
File "/home/vdhiman/wrk/gpytorch/gpytorch/utils/interpolation.py", line 202, in left_t_interp
values = rhs.unsqueeze(-2) * interp_values.unsqueeze(-1)
RuntimeError: The size of tensor a (3) must match the size of tensor b (2) at non-singleton dimension 0
|
RuntimeError
|
def _sparse_left_interp_t(self, left_interp_indices_tensor, left_interp_values_tensor):
if hasattr(self, "_sparse_left_interp_t_memo"):
if torch.equal(
self._left_interp_indices_memo, left_interp_indices_tensor
) and torch.equal(self._left_interp_values_memo, left_interp_values_tensor):
return self._sparse_left_interp_t_memo
left_interp_t = sparse.make_sparse_from_indices_and_values(
left_interp_indices_tensor,
left_interp_values_tensor,
self.base_lazy_tensor.size()[-2],
)
self._left_interp_indices_memo = left_interp_indices_tensor
self._left_interp_values_memo = left_interp_values_tensor
self._sparse_left_interp_t_memo = left_interp_t
return self._sparse_left_interp_t_memo
|
def _sparse_left_interp_t(self, left_interp_indices_tensor, left_interp_values_tensor):
if hasattr(self, "_sparse_left_interp_t_memo"):
if torch.equal(
self._left_interp_indices_memo, left_interp_indices_tensor
) and torch.equal(self._left_interp_values_memo, left_interp_values_tensor):
return self._sparse_left_interp_t_memo
left_interp_t = sparse.make_sparse_from_indices_and_values(
left_interp_indices_tensor,
left_interp_values_tensor,
self.base_lazy_tensor.size()[-1],
)
self._left_interp_indices_memo = left_interp_indices_tensor
self._left_interp_values_memo = left_interp_values_tensor
self._sparse_left_interp_t_memo = left_interp_t
return self._sparse_left_interp_t_memo
|
https://github.com/cornellius-gp/gpytorch/issues/900
|
(py37) vdhiman@dwarf:~/wrk/BayesCBF_ws/BayesCBF$ python tests/test_interpolated_lazy_tensor.py
Traceback (most recent call last):
File "tests/test_interpolated_lazy_tensor.py", line 30, in <module>
test_interpolated_lazy_tensor()
File "tests/test_interpolated_lazy_tensor.py", line 7, in test_interpolated_lazy_tensor
res = itplzt @ torch.eye(3)
File "/home/vdhiman/wrk/gpytorch/gpytorch/lazy/lazy_tensor.py", line 1731, in __matmul__
return self.matmul(other)
File "/home/vdhiman/wrk/gpytorch/gpytorch/lazy/interpolated_lazy_tensor.py", line 393, in matmul
right_interp_res = left_t_interp(self.right_interp_indices, self.right_interp_values, tensor, base_size)
File "/home/vdhiman/wrk/gpytorch/gpytorch/utils/interpolation.py", line 202, in left_t_interp
values = rhs.unsqueeze(-2) * interp_values.unsqueeze(-1)
RuntimeError: The size of tensor a (3) must match the size of tensor b (2) at non-singleton dimension 0
|
RuntimeError
|
def backward(ctx, inv_quad_grad_output, logdet_grad_output):
matrix_arg_grads = None
inv_quad_rhs_grad = None
# Which backward passes should we compute?
compute_inv_quad_grad = inv_quad_grad_output.abs().sum() and ctx.inv_quad
compute_logdet_grad = logdet_grad_output.abs().sum() and ctx.logdet
# Get input arguments, and get gradients in the proper form
matrix_args = ctx.saved_tensors[:-1]
solves = ctx.saved_tensors[-1]
if hasattr(ctx, "_lazy_tsr"):
lazy_tsr = ctx._lazy_tsr
else:
lazy_tsr = ctx.representation_tree(*matrix_args)
# Fix grad_output sizes
if ctx.inv_quad:
inv_quad_grad_output = inv_quad_grad_output.unsqueeze(-2)
if compute_logdet_grad:
logdet_grad_output = logdet_grad_output.unsqueeze(-1)
logdet_grad_output.unsqueeze_(-1)
# Divide up the solves
probe_vector_solves = None
inv_quad_solves = None
neg_inv_quad_solves_times_grad_out = None
if compute_logdet_grad:
coef = 1.0 / ctx.probe_vectors.size(-1)
probe_vector_solves = solves.narrow(-1, 0, ctx.num_random_probes).mul(coef)
probe_vector_solves.mul_(ctx.probe_vector_norms).mul_(logdet_grad_output)
probe_vectors = ctx.probe_vectors.mul(ctx.probe_vector_norms)
if ctx.inv_quad:
inv_quad_solves = solves.narrow(
-1, ctx.num_random_probes, ctx.num_inv_quad_solves
)
neg_inv_quad_solves_times_grad_out = inv_quad_solves.mul(
inv_quad_grad_output
).mul_(-1)
# input_1 gradient
if any(ctx.needs_input_grad):
# Collect terms for arg grads
left_factors_list = []
right_factors_list = []
if compute_logdet_grad:
left_factors_list.append(probe_vector_solves)
if ctx.preconditioner is not None:
probe_vectors = ctx.preconditioner(probe_vectors)
right_factors_list.append(probe_vectors)
if compute_inv_quad_grad:
left_factors_list.append(neg_inv_quad_solves_times_grad_out)
right_factors_list.append(inv_quad_solves)
left_factors = torch.cat(left_factors_list, -1)
right_factors = torch.cat(right_factors_list, -1)
matrix_arg_grads = lazy_tsr._quad_form_derivative(left_factors, right_factors)
# input_2 gradients
if compute_inv_quad_grad and ctx.needs_input_grad[9]:
inv_quad_rhs_grad = neg_inv_quad_solves_times_grad_out.mul_(-2)
elif ctx.inv_quad:
inv_quad_rhs_grad = torch.zeros_like(inv_quad_solves)
if ctx.is_vector:
inv_quad_rhs_grad.squeeze_(-1)
if ctx.inv_quad:
res = [inv_quad_rhs_grad] + list(matrix_arg_grads)
else:
res = list(matrix_arg_grads)
return tuple([None] * 9 + res)
|
def backward(ctx, inv_quad_grad_output, logdet_grad_output):
matrix_arg_grads = None
inv_quad_rhs_grad = None
# Which backward passes should we compute?
compute_inv_quad_grad = inv_quad_grad_output.abs().sum() and ctx.inv_quad
compute_logdet_grad = logdet_grad_output.abs().sum() and ctx.logdet
# Get input arguments, and get gradients in the proper form
matrix_args = ctx.saved_tensors[:-1]
solves = ctx.saved_tensors[-1]
if hasattr(ctx, "_lazy_tsr"):
lazy_tsr = ctx._lazy_tsr
else:
lazy_tsr = ctx.representation_tree(*matrix_args)
# Fix grad_output sizes
if ctx.inv_quad:
inv_quad_grad_output = inv_quad_grad_output.unsqueeze(-2)
if compute_logdet_grad:
logdet_grad_output = logdet_grad_output.unsqueeze(-1)
logdet_grad_output.unsqueeze_(-1)
# Divide up the solves
probe_vector_solves = None
inv_quad_solves = None
neg_inv_quad_solves_times_grad_out = None
if compute_logdet_grad:
coef = 1.0 / ctx.probe_vectors.size(-1)
probe_vector_solves = solves.narrow(-1, 0, ctx.num_random_probes).mul(coef)
probe_vector_solves.mul_(ctx.probe_vector_norms).mul_(logdet_grad_output)
probe_vectors = ctx.probe_vectors.mul(ctx.probe_vector_norms)
if ctx.inv_quad:
inv_quad_solves = solves.narrow(
-1, ctx.num_random_probes, ctx.num_inv_quad_solves
)
neg_inv_quad_solves_times_grad_out = inv_quad_solves.mul(
inv_quad_grad_output
).mul_(-1)
# input_1 gradient
if any(ctx.needs_input_grad):
# Collect terms for arg grads
left_factors_list = []
right_factors_list = []
if compute_logdet_grad:
left_factors_list.append(probe_vector_solves)
if ctx.preconditioner is not None:
probe_vectors = ctx.preconditioner(probe_vectors)
right_factors_list.append(probe_vectors)
if compute_inv_quad_grad:
left_factors_list.append(neg_inv_quad_solves_times_grad_out)
right_factors_list.append(inv_quad_solves)
left_factors = torch.cat(left_factors_list, -1)
right_factors = torch.cat(right_factors_list, -1)
matrix_arg_grads = lazy_tsr._quad_form_derivative(left_factors, right_factors)
# input_2 gradients
if compute_inv_quad_grad and ctx.needs_input_grad[9]:
inv_quad_rhs_grad = neg_inv_quad_solves_times_grad_out.mul_(-2)
elif ctx.inv_quad:
inv_quad_rhs_grad = torch.zeros_like(inv_quad_solves)
if ctx.is_vector:
inv_quad_rhs_grad.squeeze_(-1)
if ctx.inv_quad:
res = [inv_quad_rhs_grad] + list(matrix_arg_grads)
else:
res = matrix_arg_grads
return tuple([None] * 9 + res)
|
https://github.com/cornellius-gp/gpytorch/issues/710
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-46-593fbced29ac> in <module>()
3 kern = gpytorch.kernels.RBFKernel()(inp)
4 ld = logdet(kern)
----> 5 backward(ld)
<PATH SNIPPED>/lib/python3.7/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
91 Variable._execution_engine.run_backward(
92 tensors, grad_tensors, retain_graph, create_graph,
---> 93 allow_unreachable=True) # allow_unreachable flag
94
95
<PATH SNIPPED>/lib/python3.7/site-packages/torch/autograd/function.py in apply(self, *args)
75
76 def apply(self, *args):
---> 77 return self._forward_cls.backward(self, *args)
78
79
<PATH SNIPPED>lib/python3.7/site-packages/gpytorch/functions/_inv_quad_log_det.py in backward(ctx, inv_quad_grad_output, logdet_grad_output)
221 res = matrix_arg_grads
222
--> 223 return tuple([None] * 9 + res)
TypeError: can only concatenate list (not "tuple") to list
|
TypeError
|
def check(self, tensor):
return bool(
torch.all(tensor <= self.upper_bound) and torch.all(tensor >= self.lower_bound)
)
|
def check(self, tensor):
return torch.all(tensor <= self.upper_bound) and torch.all(
tensor >= self.lower_bound
)
|
https://github.com/cornellius-gp/gpytorch/issues/620
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-5-dcd721edef0b> in <module>
73 model.likelihood.initialize(noise=1e-5)
74 model.likelihood.noise_covar.raw_noise.requires_grad_(False)
---> 75 train(model, train_x, train_y, train_steps=100)
<ipython-input-5-dcd721edef0b> in train(model, train_x, train_y, train_steps)
64 optimizer.zero_grad()
65 output = model(train_x)
---> 66 loss = -mll(output, train_y)
67 loss.backward()
68 optimizer.step()
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target, *params)
26 # Get the log prob of the marginal distribution
27 output = self.likelihood(output, *params)
---> 28 res = output.log_prob(target)
29
30 # Add terms for SGPR / when inducing points are learned
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
127
128 # Get log determininat and first part of quadratic form
--> 129 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
130
131 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
990 from .chol_lazy_tensor import CholLazyTensor
991
--> 992 cholesky = CholLazyTensor(self.cholesky())
993 return cholesky.inv_quad_logdet(inv_quad_rhs=inv_quad_rhs, logdet=logdet, reduce_inv_quad=reduce_inv_quad)
994
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/lazy/lazy_tensor.py in cholesky(self, upper)
716 (LazyTensor) Cholesky factor (lower triangular)
717 """
--> 718 res = self._cholesky()
719 if upper:
720 res = res.transpose(-1, -2)
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/utils/memoize.py in g(self, *args, **kwargs)
32 cache_name = name if name is not None else method
33 if not is_in_cache(self, cache_name):
---> 34 add_to_cache(self, cache_name, method(self, *args, **kwargs))
35 return get_from_cache(self, cache_name)
36
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/lazy/lazy_tensor.py in _cholesky(self)
401 evaluated_mat.register_hook(_ensure_symmetric_grad)
402
--> 403 cholesky = psd_safe_cholesky(evaluated_mat.double()).to(self.dtype)
404 return NonLazyTensor(cholesky)
405
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/utils/cholesky.py in psd_safe_cholesky(A, upper, out, jitter)
44 continue
45
---> 46 raise e
47
48
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/utils/cholesky.py in psd_safe_cholesky(A, upper, out, jitter)
19 """
20 try:
---> 21 L = torch.cholesky(A, upper=upper, out=out)
22 # TODO: Remove once fixed in pytorch (#16780)
23 if A.dim() > 2 and A.is_cuda:
RuntimeError: Lapack Error in potrf : the leading minor of order 1 is not positive definite at /pytorch/aten/src/TH/generic/THTensorLapack.cpp:658
|
RuntimeError
|
def initialize(self, **kwargs):
"""
Set a value for a parameter
kwargs: (param_name, value) - parameter to initialize.
Can also initialize recursively by passing in the full name of a
parameter. For example if model has attribute model.likelihood,
we can initialize the noise with either
`model.initialize(**{'likelihood.noise': 0.1})`
or
`model.likelihood.initialize(noise=0.1)`.
The former method would allow users to more easily store the
initialization values as one object.
Value can take the form of a tensor, a float, or an int
"""
for name, val in kwargs.items():
if isinstance(val, int):
val = float(val)
if "." in name:
module, name = self._get_module_and_name(name)
module.initialize(**{name: val})
elif not hasattr(self, name):
raise AttributeError(
"Unknown parameter {p} for {c}".format(
p=name, c=self.__class__.__name__
)
)
elif name not in self._parameters:
setattr(self, name, val)
elif torch.is_tensor(val):
constraint = self.constraint_for_parameter_name(name)
if constraint is not None and not constraint.check_raw(val):
raise RuntimeError(
"Attempting to manually set a parameter value that is out of bounds of "
f"its current constraints, {constraint}. "
"Most likely, you want to do the following:\n likelihood = GaussianLikelihood"
"(noise_constraint=gpytorch.constraints.GreaterThan(better_lower_bound))"
)
try:
self.__getattr__(name).data.copy_(val.expand_as(self.__getattr__(name)))
except RuntimeError:
self.__getattr__(name).data.copy_(val.view_as(self.__getattr__(name)))
elif isinstance(val, float):
constraint = self.constraint_for_parameter_name(name)
if constraint is not None and not constraint.check_raw(val):
raise RuntimeError(
"Attempting to manually set a parameter value that is out of bounds of "
f"its current constraints, {constraint}. "
"Most likely, you want to do the following:\n likelihood = GaussianLikelihood"
"(noise_constraint=gpytorch.constraints.GreaterThan(better_lower_bound))"
)
self.__getattr__(name).data.fill_(val)
else:
raise AttributeError(
"Type {t} not valid for initializing parameter {p}".format(
t=type(val), p=name
)
)
# Ensure value is contained in support of prior (if present)
prior_name = "_".join([name, "prior"])
if prior_name in self._priors:
prior, closure, _ = self._priors[prior_name]
try:
prior._validate_sample(closure())
except ValueError as e:
raise ValueError(
"Invalid input value for prior {}. Error:\n{}".format(prior_name, e)
)
return self
|
def initialize(self, **kwargs):
"""
Set a value for a parameter
kwargs: (param_name, value) - parameter to initialize.
Can also initialize recursively by passing in the full name of a
parameter. For example if model has attribute model.likelihood,
we can initialize the noise with either
`model.initialize(**{'likelihood.noise': 0.1})`
or
`model.likelihood.initialize(noise=0.1)`.
The former method would allow users to more easily store the
initialization values as one object.
Value can take the form of a tensor, a float, or an int
"""
for name, val in kwargs.items():
if isinstance(val, int):
val = float(val)
if "." in name:
module, name = self._get_module_and_name(name)
module.initialize(**{name: val})
elif not hasattr(self, name):
raise AttributeError(
"Unknown parameter {p} for {c}".format(
p=name, c=self.__class__.__name__
)
)
elif name not in self._parameters:
setattr(self, name, val)
elif torch.is_tensor(val):
try:
self.__getattr__(name).data.copy_(val.expand_as(self.__getattr__(name)))
except RuntimeError:
self.__getattr__(name).data.copy_(val.view_as(self.__getattr__(name)))
elif isinstance(val, float):
self.__getattr__(name).data.fill_(val)
else:
raise AttributeError(
"Type {t} not valid for initializing parameter {p}".format(
t=type(val), p=name
)
)
# Ensure value is contained in support of prior (if present)
prior_name = "_".join([name, "prior"])
if prior_name in self._priors:
prior, closure, _ = self._priors[prior_name]
try:
prior._validate_sample(closure())
except ValueError as e:
raise ValueError(
"Invalid input value for prior {}. Error:\n{}".format(prior_name, e)
)
return self
|
https://github.com/cornellius-gp/gpytorch/issues/620
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-5-dcd721edef0b> in <module>
73 model.likelihood.initialize(noise=1e-5)
74 model.likelihood.noise_covar.raw_noise.requires_grad_(False)
---> 75 train(model, train_x, train_y, train_steps=100)
<ipython-input-5-dcd721edef0b> in train(model, train_x, train_y, train_steps)
64 optimizer.zero_grad()
65 output = model(train_x)
---> 66 loss = -mll(output, train_y)
67 loss.backward()
68 optimizer.step()
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
20
21 def __call__(self, *inputs, **kwargs):
---> 22 outputs = self.forward(*inputs, **kwargs)
23 if isinstance(outputs, list):
24 return [_validate_module_outputs(output) for output in outputs]
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target, *params)
26 # Get the log prob of the marginal distribution
27 output = self.likelihood(output, *params)
---> 28 res = output.log_prob(target)
29
30 # Add terms for SGPR / when inducing points are learned
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
127
128 # Get log determininat and first part of quadratic form
--> 129 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
130
131 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
990 from .chol_lazy_tensor import CholLazyTensor
991
--> 992 cholesky = CholLazyTensor(self.cholesky())
993 return cholesky.inv_quad_logdet(inv_quad_rhs=inv_quad_rhs, logdet=logdet, reduce_inv_quad=reduce_inv_quad)
994
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/lazy/lazy_tensor.py in cholesky(self, upper)
716 (LazyTensor) Cholesky factor (lower triangular)
717 """
--> 718 res = self._cholesky()
719 if upper:
720 res = res.transpose(-1, -2)
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/utils/memoize.py in g(self, *args, **kwargs)
32 cache_name = name if name is not None else method
33 if not is_in_cache(self, cache_name):
---> 34 add_to_cache(self, cache_name, method(self, *args, **kwargs))
35 return get_from_cache(self, cache_name)
36
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/lazy/lazy_tensor.py in _cholesky(self)
401 evaluated_mat.register_hook(_ensure_symmetric_grad)
402
--> 403 cholesky = psd_safe_cholesky(evaluated_mat.double()).to(self.dtype)
404 return NonLazyTensor(cholesky)
405
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/utils/cholesky.py in psd_safe_cholesky(A, upper, out, jitter)
44 continue
45
---> 46 raise e
47
48
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/src/gpytorch/gpytorch/utils/cholesky.py in psd_safe_cholesky(A, upper, out, jitter)
19 """
20 try:
---> 21 L = torch.cholesky(A, upper=upper, out=out)
22 # TODO: Remove once fixed in pytorch (#16780)
23 if A.dim() > 2 and A.is_cuda:
RuntimeError: Lapack Error in potrf : the leading minor of order 1 is not positive definite at /pytorch/aten/src/TH/generic/THTensorLapack.cpp:658
|
RuntimeError
|
def __init__(
self,
base_lazy_tensor,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=None,
right_interp_values=None,
):
base_lazy_tensor = lazify(base_lazy_tensor)
if left_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
left_interp_indices = torch.arange(
0, num_rows, dtype=torch.long, device=base_lazy_tensor.device
)
left_interp_indices.unsqueeze_(-1)
left_interp_indices = left_interp_indices.expand(
*base_lazy_tensor.batch_shape, num_rows, 1
)
if left_interp_values is None:
left_interp_values = torch.ones(
left_interp_indices.size(),
dtype=base_lazy_tensor.dtype,
device=base_lazy_tensor.device,
)
if right_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
right_interp_indices = torch.arange(
0, num_rows, dtype=torch.long, device=base_lazy_tensor.device
)
right_interp_indices.unsqueeze_(-1)
right_interp_indices = right_interp_indices.expand(
*base_lazy_tensor.batch_shape, num_rows, 1
)
if right_interp_values is None:
right_interp_values = torch.ones(
right_interp_indices.size(),
dtype=base_lazy_tensor.dtype,
device=base_lazy_tensor.device,
)
if left_interp_indices.shape[:-2] != base_lazy_tensor.batch_shape:
try:
base_lazy_tensor = base_lazy_tensor._expand_batch(
left_interp_indices.shape[:-2]
)
except RuntimeError:
raise RuntimeError(
"interp size ({}) is incompatible with base_lazy_tensor size ({}). ".format(
right_interp_indices.size(), base_lazy_tensor.size()
)
)
super(InterpolatedLazyTensor, self).__init__(
base_lazy_tensor,
left_interp_indices,
left_interp_values,
right_interp_indices,
right_interp_values,
)
self.base_lazy_tensor = base_lazy_tensor
self.left_interp_indices = left_interp_indices
self.left_interp_values = left_interp_values
self.right_interp_indices = right_interp_indices
self.right_interp_values = right_interp_values
|
def __init__(
self,
base_lazy_tensor,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=None,
right_interp_values=None,
):
base_lazy_tensor = lazify(base_lazy_tensor)
if left_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
left_interp_indices = torch.arange(
0, num_rows, dtype=torch.long, device=base_lazy_tensor.device
)
left_interp_indices.unsqueeze_(-1)
left_interp_indices = left_interp_indices.expand(
*base_lazy_tensor.batch_shape, num_rows, 1
)
if left_interp_values is None:
left_interp_values = torch.ones(
left_interp_indices.size(),
dtype=base_lazy_tensor.dtype,
device=base_lazy_tensor.device,
)
else:
if left_interp_indices.size() != left_interp_values.size():
raise RuntimeError(
"Expected left_interp_indices ({}) to have the same size as "
"left_interp_values ({})".format(
left_interp_indices.size(), left_interp_values.size()
)
)
if right_interp_indices is None:
num_rows = base_lazy_tensor.size(-2)
right_interp_indices = torch.arange(
0, num_rows, dtype=torch.long, device=base_lazy_tensor.device
)
right_interp_indices.unsqueeze_(-1)
right_interp_indices = right_interp_indices.expand(
*base_lazy_tensor.batch_shape, num_rows, 1
)
if right_interp_values is None:
right_interp_values = torch.ones(
right_interp_indices.size(),
dtype=base_lazy_tensor.dtype,
device=base_lazy_tensor.device,
)
else:
if left_interp_indices.size() != left_interp_values.size():
raise RuntimeError(
"Expected left_interp_indices ({}) to have the same size as "
"left_interp_values ({})".format(
left_interp_indices.size(), left_interp_values.size()
)
)
# Make sure that left/right interp tensors have the same batch shape as the base_lazy_tensor
if left_interp_indices.shape[:-2] != base_lazy_tensor.batch_shape:
raise RuntimeError(
"left interp size ({}) is incompatible with base_lazy_tensor size ({}). Make sure the two "
"have the same number of batch dimensions".format(
left_interp_indices.size(), base_lazy_tensor.size()
)
)
if right_interp_indices.shape[:-2] != base_lazy_tensor.batch_shape:
raise RuntimeError(
"right interp size ({}) is incompatible with base_lazy_tensor size ({}). Make sure the two "
"have the same number of batch dimensions".format(
right_interp_indices.size(), base_lazy_tensor.size()
)
)
super(InterpolatedLazyTensor, self).__init__(
base_lazy_tensor,
left_interp_indices,
left_interp_values,
right_interp_indices,
right_interp_values,
)
self.base_lazy_tensor = base_lazy_tensor
self.left_interp_indices = left_interp_indices
self.left_interp_values = left_interp_values
self.right_interp_indices = right_interp_indices
self.right_interp_values = right_interp_values
|
https://github.com/cornellius-gp/gpytorch/issues/532
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-62-28172f8b7beb> in <module>()
1 with torch.no_grad(), gpytorch.settings.fast_pred_var():
----> 2 observed_pred_y1 = likelihood(model(test_x, tast_i_task1))
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/models/exact_gp.py in __call__(self, *args, **kwargs)
199 )
200
--> 201 full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
202 if settings.debug().on():
203 if not isinstance(full_output, MultivariateNormal):
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/module.py in __call__(self, *inputs, **kwargs)
18
19 def __call__(self, *inputs, **kwargs):
---> 20 outputs = self.forward(*inputs, **kwargs)
21 if isinstance(outputs, list):
22 return [_validate_module_outputs(output) for output in outputs]
<ipython-input-52-b565681c3db1> in forward(self, x, i)
17 covar_i = self.task_covar_module(i)
18 # Multiply the two together to get the covariance we want
---> 19 covar = covar_x.mul(covar_i)
20
21 return gpytorch.distributions.MultivariateNormal(mean_x, covar)
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/lazy_tensor.py in mul(self, other)
950 return self._mul_constant(other.view(*other.shape[:-2]))
951
--> 952 return self._mul_matrix(other)
953
954 def ndimension(self):
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/lazy_tensor.py in _mul_matrix(self, other)
443 """
444 from .mul_lazy_tensor import MulLazyTensor
--> 445 return MulLazyTensor(self, other).evaluate_kernel()
446
447 def _preconditioner(self):
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/lazy_tensor.py in evaluate_kernel(self)
734 all lazily evaluated kernels actually evaluated.
735 """
--> 736 return self.representation_tree()(*self.representation())
737
738 def inv_matmul(self, right_tensor, left_tensor=None):
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/mul_lazy_tensor.py in representation_tree(self)
235
236 def representation_tree(self):
--> 237 if self.non_lazy_self is not None:
238 return self.non_lazy_self.representation_tree()
239 else:
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/mul_lazy_tensor.py in non_lazy_self(self)
37 if hasattr(self, "_non_lazy_self"):
38 return self._non_lazy_self[0]
---> 39 elif len(self._args) == 1:
40 return self._args[0]
41 else:
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/mul_lazy_tensor.py in _args(self)
54 if not hasattr(self, "_mul_args_memo") and not hasattr(self, "_non_lazy_self"):
55 lazy_tensors = sorted(
---> 56 (lv.evaluate_kernel() for lv in self.lazy_tensors), key=lambda lv: lv.root_decomposition_size()
57 )
58
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/mul_lazy_tensor.py in <genexpr>(.0)
54 if not hasattr(self, "_mul_args_memo") and not hasattr(self, "_non_lazy_self"):
55 lazy_tensors = sorted(
---> 56 (lv.evaluate_kernel() for lv in self.lazy_tensors), key=lambda lv: lv.root_decomposition_size()
57 )
58
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/lazy_evaluated_kernel_tensor.py in evaluate_kernel(self)
159 with settings.lazily_evaluate_kernels(False):
160 self._cached_kernel_eval = self.kernel(
--> 161 x1, x2, diag=False, batch_dims=self.batch_dims, **self.params
162 )
163
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/kernels/kernel.py in __call__(self, x1, x2, diag, batch_dims, **params)
396 res = LazyEvaluatedKernelTensor(self, x1_, x2_, batch_dims=batch_dims, **params)
397 else:
--> 398 res = super(Kernel, self).__call__(x1_, x2_, batch_dims=batch_dims, **params)
399
400 # TODO: remove bach checking once kernels support arbitrary batch dimensions
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/module.py in __call__(self, *inputs, **kwargs)
18
19 def __call__(self, *inputs, **kwargs):
---> 20 outputs = self.forward(*inputs, **kwargs)
21 if isinstance(outputs, list):
22 return [_validate_module_outputs(output) for output in outputs]
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/kernels/index_kernel.py in forward(self, i1, i2, **params)
78 def forward(self, i1, i2, **params):
79 covar_matrix = self._eval_covar_matrix()
---> 80 res = InterpolatedLazyTensor(base_lazy_tensor=covar_matrix, left_interp_indices=i1, right_interp_indices=i2)
81 return res
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_lazarus#link-tree/gpytorch/lazy/interpolated_lazy_tensor.py in __init__(self, base_lazy_tensor, left_interp_indices, left_interp_values, right_interp_indices, right_interp_values)
60 raise RuntimeError(
61 "left interp size ({}) is incompatible with base_lazy_tensor size ({}). Make sure the two "
---> 62 "have the same number of batch dimensions".format(left_interp_indices.size(), base_lazy_tensor.size())
63 )
64 if right_interp_indices.shape[:-2] != base_lazy_tensor.batch_shape:
RuntimeError: left interp size (torch.Size([2, 251, 1])) is incompatible with base_lazy_tensor size (torch.Size([1, 2, 2])). Make sure the two have the same number of batch dimensions
|
RuntimeError
|
def evaluate_kernel(self):
"""
NB: This is a meta LazyTensor, in the sense that evaluate can return
a LazyTensor if the kernel being evaluated does so.
"""
if not self.is_batch:
x1 = self.x1.unsqueeze(0)
x2 = self.x2.unsqueeze(0)
else:
x1 = self.x1
x2 = self.x2
with settings.lazily_evaluate_kernels(False):
temp_active_dims = self.kernel.active_dims
self.kernel.active_dims = None
res = self.kernel(x1, x2, diag=False, batch_dims=self.batch_dims, **self.params)
self.kernel.active_dims = temp_active_dims
if self.squeeze_row:
res.squeeze_(-2)
if self.squeeze_col:
res.squeeze_(-1)
if not self.is_batch and res.ndimension() == 3 and res.size(0) == 1:
res = res[0]
return lazify(res)
|
def evaluate_kernel(self):
"""
NB: This is a meta LazyTensor, in the sense that evaluate can return
a LazyTensor if the kernel being evaluated does so.
"""
if not self.is_batch:
x1 = self.x1.unsqueeze(0)
x2 = self.x2.unsqueeze(0)
else:
x1 = self.x1
x2 = self.x2
with settings.lazily_evaluate_kernels(False):
res = self.kernel(x1, x2, diag=False, batch_dims=self.batch_dims, **self.params)
if self.squeeze_row:
res.squeeze_(-2)
if self.squeeze_col:
res.squeeze_(-1)
if not self.is_batch and res.ndimension() == 3 and res.size(0) == 1:
res = res[0]
return lazify(res)
|
https://github.com/cornellius-gp/gpytorch/issues/575
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-67-cb234b5fe124> in <module>
15 output = model(X)
16 # Calc loss and backprop gradients
---> 17 loss = -mll(output, y)
18 loss.backward()
19 if not i % 30:
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
18
19 def __call__(self, *inputs, **kwargs):
---> 20 outputs = self.forward(*inputs, **kwargs)
21 if isinstance(outputs, list):
22 return [_validate_module_outputs(output) for output in outputs]
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target, *params)
26 # Get the log prob of the marginal distribution
27 output = self.likelihood(output, *params)
---> 28 res = output.log_prob(target)
29
30 # Add terms for SGPR / when inducing points are learned
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
123
124 # Get log determininat and first part of quadratic form
--> 125 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
126
127 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
745 )
746
--> 747 args = self.representation()
748 if inv_quad_rhs is not None:
749 args = [inv_quad_rhs] + list(args)
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py in representation(self)
980 representation.append(arg)
981 elif hasattr(arg, "representation") and callable(arg.representation): # Is it a LazyTensor?
--> 982 representation += list(arg.representation())
983 else:
984 raise RuntimeError("Representation of a LazyTensor should consist only of Tensors")
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py in representation(self)
306 # representation
307 else:
--> 308 return self.evaluate_kernel().representation()
309
310 def representation_tree(self):
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/utils/memoize.py in g(self, *args, **kwargs)
15 cache_name = name if name is not None else method
16 if cache_name not in self._memoize_cache:
---> 17 self._memoize_cache[cache_name] = method(self, *args, **kwargs)
18 return self._memoize_cache[cache_name]
19
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/lazy/lazy_evaluated_kernel_tensor.py in evaluate_kernel(self)
260 with settings.lazily_evaluate_kernels(False):
261 res = self.kernel(
--> 262 x1, x2, diag=False, batch_dims=self.batch_dims, **self.params
263 )
264 if self.squeeze_row:
~/.virtualenvs/GenRS/lib/python3.7/site-packages/gpytorch/kernels/kernel.py in __call__(self, x1, x2, diag, batch_dims, **params)
313 # Select the active dimensions
314 if self.active_dims is not None:
--> 315 x1_ = x1_.index_select(-1, self.active_dims)
316 if x2_ is not None:
317 x2_ = x2_.index_select(-1, self.active_dims)
RuntimeError: invalid argument 3: out of range at /Users/soumith/b101/2019_02_04/wheel_build_dirs/wheel_3.7/pytorch/aten/src/TH/generic/THTensor.cpp:350
|
RuntimeError
|
def root_inv_decomposition(self, initial_vectors=None, test_vectors=None):
"""
Returns a (usually low-rank) root decomposotion lazy tensor of a PSD matrix.
This can be used for sampling from a Gaussian distribution, or for obtaining a
low-rank version of a matrix
"""
from .root_lazy_tensor import RootLazyTensor
if self.shape[-2:].numel() == 1:
return RootLazyTensor(1 / self.evaluate().sqrt())
if not self.is_square:
raise RuntimeError(
"root_inv_decomposition only operates on (batches of) square (symmetric) LazyTensors. "
"Got a {} of size {}.".format(self.__class__.__name__, self.size())
)
if initial_vectors is not None:
if self.dim() == 2 and initial_vectors.dim() == 1:
if self.shape[-1] != initial_vectors.numel():
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with initial_vectors (size={}).".format(
self.shape, initial_vectors.shape
)
)
elif self.dim() != initial_vectors.dim():
raise RuntimeError(
"LazyTensor (size={}) and initial_vectors (size={}) should have the same number "
"of dimensions.".format(self.shape, initial_vectors.shape)
)
elif (
self.batch_shape != initial_vectors.shape[:-2]
or self.shape[-1] != initial_vectors.shape[-2]
):
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with initial_vectors (size={}).".format(
self.shape, initial_vectors.shape
)
)
roots, inv_roots = RootDecomposition(
self.representation_tree(),
max_iter=self.root_decomposition_size(),
dtype=self.dtype,
device=self.device,
batch_shape=self.batch_shape,
matrix_shape=self.matrix_shape,
root=True,
inverse=True,
initial_vectors=initial_vectors,
)(*self.representation())
if initial_vectors is not None and initial_vectors.size(-1) > 1:
self._memoize_cache["root_decomposition"] = RootLazyTensor(roots[0])
else:
self._memoize_cache["root_decomposition"] = RootLazyTensor(roots)
# Choose the best of the inv_roots, if there were more than one initial vectors
if initial_vectors is not None and initial_vectors.size(-1) > 1:
num_probes = initial_vectors.size(-1)
test_vectors = test_vectors.unsqueeze(0)
# Compute solves
solves = inv_roots.matmul(inv_roots.transpose(-1, -2).matmul(test_vectors))
# Compute self * solves
solves = (
solves.permute(*range(1, self.dim() + 1), 0)
.contiguous()
.view(*self.batch_shape, self.matrix_shape[-1], -1)
)
mat_times_solves = self.matmul(solves)
mat_times_solves = mat_times_solves.view(
*self.batch_shape, self.matrix_shape[-1], -1, num_probes
).permute(-1, *range(0, self.dim()))
# Compute residuals
residuals = (mat_times_solves - test_vectors).norm(2, dim=-2)
residuals = residuals.view(residuals.size(0), -1).sum(-1)
# Choose solve that best fits
_, best_solve_index = residuals.min(0)
inv_root = inv_roots[best_solve_index].squeeze(0)
else:
inv_root = inv_roots
return RootLazyTensor(inv_root)
|
def root_inv_decomposition(self, initial_vectors=None, test_vectors=None):
"""
Returns a (usually low-rank) root decomposotion lazy tensor of a PSD matrix.
This can be used for sampling from a Gaussian distribution, or for obtaining a
low-rank version of a matrix
"""
from .root_lazy_tensor import RootLazyTensor
if not self.is_square:
raise RuntimeError(
"root_inv_decomposition only operates on (batches of) square (symmetric) LazyTensors. "
"Got a {} of size {}.".format(self.__class__.__name__, self.size())
)
if initial_vectors is not None:
if self.dim() == 2 and initial_vectors.dim() == 1:
if self.shape[-1] != initial_vectors.numel():
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with initial_vectors (size={}).".format(
self.shape, initial_vectors.shape
)
)
elif self.dim() != initial_vectors.dim():
raise RuntimeError(
"LazyTensor (size={}) and initial_vectors (size={}) should have the same number "
"of dimensions.".format(self.shape, initial_vectors.shape)
)
elif (
self.batch_shape != initial_vectors.shape[:-2]
or self.shape[-1] != initial_vectors.shape[-2]
):
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with initial_vectors (size={}).".format(
self.shape, initial_vectors.shape
)
)
roots, inv_roots = RootDecomposition(
self.representation_tree(),
max_iter=self.root_decomposition_size(),
dtype=self.dtype,
device=self.device,
batch_shape=self.batch_shape,
matrix_shape=self.matrix_shape,
root=True,
inverse=True,
initial_vectors=initial_vectors,
)(*self.representation())
if initial_vectors is not None and initial_vectors.size(-1) > 1:
self._memoize_cache["root_decomposition"] = RootLazyTensor(roots[0])
else:
self._memoize_cache["root_decomposition"] = RootLazyTensor(roots)
# Choose the best of the inv_roots, if there were more than one initial vectors
if initial_vectors is not None and initial_vectors.size(-1) > 1:
num_probes = initial_vectors.size(-1)
test_vectors = test_vectors.unsqueeze(0)
# Compute solves
solves = inv_roots.matmul(inv_roots.transpose(-1, -2).matmul(test_vectors))
# Compute self * solves
solves = (
solves.permute(*range(1, self.dim() + 1), 0)
.contiguous()
.view(*self.batch_shape, self.matrix_shape[-1], -1)
)
mat_times_solves = self.matmul(solves)
mat_times_solves = mat_times_solves.view(
*self.batch_shape, self.matrix_shape[-1], -1, num_probes
).permute(-1, *range(0, self.dim()))
# Compute residuals
residuals = (mat_times_solves - test_vectors).norm(2, dim=-2)
residuals = residuals.view(residuals.size(0), -1).sum(-1)
# Choose solve that best fits
_, best_solve_index = residuals.min(0)
inv_root = inv_roots[best_solve_index].squeeze(0)
else:
inv_root = inv_roots
return RootLazyTensor(inv_root)
|
https://github.com/cornellius-gp/gpytorch/issues/548
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-1-05bfb3c2646a> in <module>
27 # this throws the error
28 with gpytorch.settings.fast_pred_var():
---> 29 model(torch.rand(100, 2))
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/lib/python3.7/site-packages/gpytorch/models/exact_gp.py in __call__(self, *args, **kwargs)
262
263 predictive_mean = self.prediction_strategy.exact_predictive_mean(test_mean, test_train_covar)
--> 264 predictive_covar = self.prediction_strategy.exact_predictive_covar(test_test_covar, test_train_covar)
265
266 if num_tasks > 1:
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/lib/python3.7/site-packages/gpytorch/models/exact_prediction_strategies.py in exact_predictive_covar(self, test_test_covar, test_train_covar)
324 return test_test_covar + MatmulLazyTensor(test_train_covar, covar_correction_rhs)
325
--> 326 precomputed_cache = self.covar_cache
327 covar_inv_quad_form_root = self._exact_predictive_covar_inv_quad_form_root(precomputed_cache,
328 test_train_covar)
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/lib/python3.7/site-packages/gpytorch/utils/memoize.py in g(self, *args, **kwargs)
15 cache_name = name if name is not None else method
16 if cache_name not in self._memoize_cache:
---> 17 self._memoize_cache[cache_name] = method(self, *args, **kwargs)
18 return self._memoize_cache[cache_name]
19
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/lib/python3.7/site-packages/gpytorch/models/exact_prediction_strategies.py in covar_cache(self)
265 train_train_covar_inv_root = train_train_covar[0].root_inv_decomposition().root.evaluate()
266 else:
--> 267 train_train_covar_inv_root = train_train_covar.root_inv_decomposition().root.evaluate()
268
269 return self._exact_predictive_covar_inv_quad_form_cache(train_train_covar_inv_root, self._last_test_train_covar)
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/lib/python3.7/site-packages/gpytorch/utils/memoize.py in g(self, *args, **kwargs)
15 cache_name = name if name is not None else method
16 if cache_name not in self._memoize_cache:
---> 17 self._memoize_cache[cache_name] = method(self, *args, **kwargs)
18 return self._memoize_cache[cache_name]
19
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/lib/python3.7/site-packages/gpytorch/lazy/lazy_tensor.py in root_inv_decomposition(self, initial_vectors, test_vectors)
1088 inverse=True,
1089 initial_vectors=initial_vectors,
-> 1090 )(*self.representation())
1091
1092 if initial_vectors is not None and initial_vectors.size(-1) > 1:
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/lib/python3.7/site-packages/gpytorch/functions/_root_decomposition.py in forward(self, *matrix_args)
51 matrix_shape=self.matrix_shape,
52 batch_shape=self.batch_shape,
---> 53 init_vecs=self.initial_vectors,
54 )
55
~/.cache/pypoetry/virtualenvs/bayes-optim-py3.7/lib/python3.7/site-packages/gpytorch/utils/lanczos.py in lanczos_tridiag(matmul_closure, max_iter, dtype, device, matrix_shape, batch_shape, init_vecs, num_init_vecs, tol)
80 # Copy over alpha_0 and beta_0 to t_mat
81 t_mat[0, 0].copy_(alpha_0)
---> 82 t_mat[0, 1].copy_(beta_0)
83 t_mat[1, 0].copy_(beta_0)
84
IndexError: index 1 is out of bounds for dimension 0 with size 1
|
IndexError
|
def __init__(self, representation_tree, has_left=False):
self.representation_tree = representation_tree
self.has_left = has_left
|
def __init__(self, representation_tree, preconditioner=None, has_left=False):
self.representation_tree = representation_tree
self.preconditioner = preconditioner
self.has_left = has_left
|
https://github.com/cornellius-gp/gpytorch/issues/501
|
$ python test_1D_grid_gp_regression.py
E.
======================================================================
ERROR: test_grid_gp_mean_abs_error (__main__.TestGridGPRegression)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_1D_grid_gp_regression.py", line 87, in test_grid_gp_mean_abs_error
loss.backward()
File "/usr/local/lib/python3.6/dist-packages/torch/tensor.py", line 102, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
File "/home/gpytorch/gpytorch/functions/_matmul.py", line 34, in backward
rhs = self.saved_tensors[0]
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
----------------------------------------------------------------------
Ran 2 tests in 0.088s
FAILED (errors=1)
|
RuntimeError
|
def forward(self, *args):
left_tensor = None
right_tensor = None
matrix_args = None
if self.has_left:
left_tensor, right_tensor, *matrix_args = args
else:
right_tensor, *matrix_args = args
orig_right_tensor = right_tensor
lazy_tsr = self.representation_tree(*matrix_args)
with torch.no_grad():
self.preconditioner = lazy_tsr.detach()._inv_matmul_preconditioner()
self.is_vector = False
if right_tensor.ndimension() == 1:
right_tensor = right_tensor.unsqueeze(-1)
self.is_vector = True
# Perform solves (for inv_quad) and tridiagonalization (for estimating logdet)
if self.has_left:
rhs = torch.cat([left_tensor.transpose(-1, -2), right_tensor], -1)
solves = lazy_tsr._solve(rhs, self.preconditioner)
res = solves[..., left_tensor.size(-2) :]
res = left_tensor @ res
else:
solves = lazy_tsr._solve(right_tensor, self.preconditioner)
res = solves
if self.is_vector:
res = res.squeeze(-1)
if self.has_left:
args = [solves, left_tensor, orig_right_tensor] + list(matrix_args)
else:
args = [solves, orig_right_tensor] + list(matrix_args)
self.save_for_backward(*args)
if settings.memory_efficient.off():
self._lazy_tsr = lazy_tsr
return res
|
def forward(self, *args):
left_tensor = None
right_tensor = None
matrix_args = None
if self.has_left:
left_tensor, right_tensor, *matrix_args = args
else:
right_tensor, *matrix_args = args
orig_right_tensor = right_tensor
lazy_tsr = self.representation_tree(*matrix_args)
self.is_vector = False
if right_tensor.ndimension() == 1:
right_tensor = right_tensor.unsqueeze(-1)
self.is_vector = True
# Perform solves (for inv_quad) and tridiagonalization (for estimating logdet)
if self.has_left:
rhs = torch.cat([left_tensor.transpose(-1, -2), right_tensor], -1)
solves = lazy_tsr._solve(rhs, self.preconditioner)
res = solves[..., left_tensor.size(-2) :]
res = left_tensor @ res
else:
solves = lazy_tsr._solve(right_tensor, self.preconditioner)
res = solves
if self.is_vector:
res = res.squeeze(-1)
if self.has_left:
args = [solves, left_tensor, orig_right_tensor] + list(matrix_args)
else:
args = [solves, orig_right_tensor] + list(matrix_args)
self.save_for_backward(*args)
if settings.memory_efficient.off():
self._lazy_tsr = lazy_tsr
return res
|
https://github.com/cornellius-gp/gpytorch/issues/501
|
$ python test_1D_grid_gp_regression.py
E.
======================================================================
ERROR: test_grid_gp_mean_abs_error (__main__.TestGridGPRegression)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_1D_grid_gp_regression.py", line 87, in test_grid_gp_mean_abs_error
loss.backward()
File "/usr/local/lib/python3.6/dist-packages/torch/tensor.py", line 102, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
File "/home/gpytorch/gpytorch/functions/_matmul.py", line 34, in backward
rhs = self.saved_tensors[0]
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
----------------------------------------------------------------------
Ran 2 tests in 0.088s
FAILED (errors=1)
|
RuntimeError
|
def __init__(
self,
representation_tree,
dtype,
device,
matrix_shape,
batch_shape=torch.Size(),
inv_quad=False,
logdet=False,
probe_vectors=None,
probe_vector_norms=None,
):
if not (inv_quad or logdet):
raise RuntimeError("Either inv_quad or logdet must be true (or both)")
self.representation_tree = representation_tree
self.dtype = dtype
self.device = device
self.matrix_shape = matrix_shape
self.batch_shape = batch_shape
self.inv_quad = inv_quad
self.logdet = logdet
if (probe_vectors is None or probe_vector_norms is None) and logdet:
num_random_probes = settings.num_trace_samples.value()
probe_vectors = torch.empty(
matrix_shape[-1], num_random_probes, dtype=dtype, device=device
)
probe_vectors.bernoulli_().mul_(2).add_(-1)
probe_vector_norms = torch.norm(probe_vectors, 2, dim=-2, keepdim=True)
if batch_shape is not None:
probe_vectors = probe_vectors.expand(
*batch_shape, matrix_shape[-1], num_random_probes
)
probe_vector_norms = probe_vector_norms.expand(
*batch_shape, 1, num_random_probes
)
probe_vectors = probe_vectors.div(probe_vector_norms)
self.probe_vectors = probe_vectors
self.probe_vector_norms = probe_vector_norms
|
def __init__(
self,
representation_tree,
dtype,
device,
matrix_shape,
batch_shape=torch.Size(),
inv_quad=False,
logdet=False,
preconditioner=None,
logdet_correction=None,
probe_vectors=None,
probe_vector_norms=None,
):
if not (inv_quad or logdet):
raise RuntimeError("Either inv_quad or logdet must be true (or both)")
self.representation_tree = representation_tree
self.dtype = dtype
self.device = device
self.matrix_shape = matrix_shape
self.batch_shape = batch_shape
self.inv_quad = inv_quad
self.logdet = logdet
self.preconditioner = preconditioner
self.logdet_correction = logdet_correction
if (probe_vectors is None or probe_vector_norms is None) and logdet:
num_random_probes = settings.num_trace_samples.value()
probe_vectors = torch.empty(
matrix_shape[-1], num_random_probes, dtype=dtype, device=device
)
probe_vectors.bernoulli_().mul_(2).add_(-1)
probe_vector_norms = torch.norm(probe_vectors, 2, dim=-2, keepdim=True)
if batch_shape is not None:
probe_vectors = probe_vectors.expand(
*batch_shape, matrix_shape[-1], num_random_probes
)
probe_vector_norms = probe_vector_norms.expand(
*batch_shape, 1, num_random_probes
)
probe_vectors = probe_vectors.div(probe_vector_norms)
self.probe_vectors = probe_vectors
self.probe_vector_norms = probe_vector_norms
|
https://github.com/cornellius-gp/gpytorch/issues/501
|
$ python test_1D_grid_gp_regression.py
E.
======================================================================
ERROR: test_grid_gp_mean_abs_error (__main__.TestGridGPRegression)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_1D_grid_gp_regression.py", line 87, in test_grid_gp_mean_abs_error
loss.backward()
File "/usr/local/lib/python3.6/dist-packages/torch/tensor.py", line 102, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
File "/home/gpytorch/gpytorch/functions/_matmul.py", line 34, in backward
rhs = self.saved_tensors[0]
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
----------------------------------------------------------------------
Ran 2 tests in 0.088s
FAILED (errors=1)
|
RuntimeError
|
def forward(self, *args):
"""
*args - The arguments representing the PSD matrix A (or batch of PSD matrices A)
If self.inv_quad is true, the first entry in *args is inv_quad_rhs (Tensor)
- the RHS of the matrix solves.
Returns:
- (Scalar) The inverse quadratic form (or None, if self.inv_quad is False)
- (Scalar) The log determinant (or None, self.if logdet is False)
"""
matrix_args = None
inv_quad_rhs = None
if self.inv_quad:
matrix_args = args[1:]
inv_quad_rhs = args[0]
else:
matrix_args = args
# Get closure for matmul
lazy_tsr = self.representation_tree(*matrix_args)
with torch.no_grad():
preconditioner, logdet_correction = lazy_tsr.detach()._preconditioner()
# Collect terms for LinearCG
# We use LinearCG for both matrix solves and for stochastically estimating the log det
rhs_list = []
num_random_probes = 0
num_inv_quad_solves = 0
# RHS for logdet
if self.logdet:
rhs_list.append(self.probe_vectors)
num_random_probes = self.probe_vectors.size(-1)
# RHS for inv_quad
self.is_vector = False
if self.inv_quad:
if inv_quad_rhs.ndimension() == 1:
inv_quad_rhs = inv_quad_rhs.unsqueeze(-1)
self.is_vector = True
rhs_list.append(inv_quad_rhs)
num_inv_quad_solves = inv_quad_rhs.size(-1)
# Perform solves (for inv_quad) and tridiagonalization (for estimating logdet)
rhs = torch.cat(rhs_list, -1)
t_mat = None
if self.logdet and settings.skip_logdet_forward.off():
solves, t_mat = lazy_tsr._solve(
rhs, preconditioner, num_tridiag=num_random_probes
)
else:
solves = lazy_tsr._solve(rhs, preconditioner, num_tridiag=0)
# Final values to return
logdet_term = torch.zeros(
lazy_tsr.batch_shape, dtype=self.dtype, device=self.device
)
inv_quad_term = torch.zeros(
lazy_tsr.batch_shape, dtype=self.dtype, device=self.device
)
# Compute logdet from tridiagonalization
if self.logdet and settings.skip_logdet_forward.off():
if torch.any(torch.isnan(t_mat)).item():
logdet_term = torch.tensor(
float("nan"), dtype=self.dtype, device=self.device
)
else:
if self.batch_shape is None:
t_mat = t_mat.unsqueeze(1)
eigenvalues, eigenvectors = lanczos_tridiag_to_diag(t_mat)
slq = StochasticLQ()
(logdet_term,) = slq.evaluate(
self.matrix_shape, eigenvalues, eigenvectors, [lambda x: x.log()]
)
# Add correction
if logdet_correction is not None:
logdet_term = logdet_term + logdet_correction
# Extract inv_quad solves from all the solves
if self.inv_quad:
inv_quad_solves = solves.narrow(-1, num_random_probes, num_inv_quad_solves)
inv_quad_term = (inv_quad_solves * inv_quad_rhs).sum(-2)
self.num_random_probes = num_random_probes
self.num_inv_quad_solves = num_inv_quad_solves
to_save = list(matrix_args) + [
solves,
]
self.save_for_backward(*to_save)
if settings.memory_efficient.off():
self._lazy_tsr = lazy_tsr
return inv_quad_term, logdet_term
|
def forward(self, *args):
"""
*args - The arguments representing the PSD matrix A (or batch of PSD matrices A)
If self.inv_quad is true, the first entry in *args is inv_quad_rhs (Tensor)
- the RHS of the matrix solves.
Returns:
- (Scalar) The inverse quadratic form (or None, if self.inv_quad is False)
- (Scalar) The log determinant (or None, self.if logdet is False)
"""
matrix_args = None
inv_quad_rhs = None
if self.inv_quad:
matrix_args = args[1:]
inv_quad_rhs = args[0]
else:
matrix_args = args
# Get closure for matmul
lazy_tsr = self.representation_tree(*matrix_args)
# Collect terms for LinearCG
# We use LinearCG for both matrix solves and for stochastically estimating the log det
rhs_list = []
num_random_probes = 0
num_inv_quad_solves = 0
# RHS for logdet
if self.logdet:
rhs_list.append(self.probe_vectors)
num_random_probes = self.probe_vectors.size(-1)
# RHS for inv_quad
self.is_vector = False
if self.inv_quad:
if inv_quad_rhs.ndimension() == 1:
inv_quad_rhs = inv_quad_rhs.unsqueeze(-1)
self.is_vector = True
rhs_list.append(inv_quad_rhs)
num_inv_quad_solves = inv_quad_rhs.size(-1)
# Perform solves (for inv_quad) and tridiagonalization (for estimating logdet)
rhs = torch.cat(rhs_list, -1)
t_mat = None
if self.logdet and settings.skip_logdet_forward.off():
solves, t_mat = lazy_tsr._solve(
rhs, self.preconditioner, num_tridiag=num_random_probes
)
else:
solves = lazy_tsr._solve(rhs, self.preconditioner, num_tridiag=0)
# Final values to return
logdet_term = torch.zeros(
lazy_tsr.batch_shape, dtype=self.dtype, device=self.device
)
inv_quad_term = torch.zeros(
lazy_tsr.batch_shape, dtype=self.dtype, device=self.device
)
# Compute logdet from tridiagonalization
if self.logdet and settings.skip_logdet_forward.off():
if torch.any(torch.isnan(t_mat)).item():
logdet_term = torch.tensor(
float("nan"), dtype=self.dtype, device=self.device
)
else:
if self.batch_shape is None:
t_mat = t_mat.unsqueeze(1)
eigenvalues, eigenvectors = lanczos_tridiag_to_diag(t_mat)
slq = StochasticLQ()
(logdet_term,) = slq.evaluate(
self.matrix_shape, eigenvalues, eigenvectors, [lambda x: x.log()]
)
# Add correction
if self.logdet_correction is not None:
logdet_term = logdet_term + self.logdet_correction
# Extract inv_quad solves from all the solves
if self.inv_quad:
inv_quad_solves = solves.narrow(-1, num_random_probes, num_inv_quad_solves)
inv_quad_term = (inv_quad_solves * inv_quad_rhs).sum(-2)
self.num_random_probes = num_random_probes
self.num_inv_quad_solves = num_inv_quad_solves
to_save = list(matrix_args) + [
solves,
]
self.save_for_backward(*to_save)
if settings.memory_efficient.off():
self._lazy_tsr = lazy_tsr
return inv_quad_term, logdet_term
|
https://github.com/cornellius-gp/gpytorch/issues/501
|
$ python test_1D_grid_gp_regression.py
E.
======================================================================
ERROR: test_grid_gp_mean_abs_error (__main__.TestGridGPRegression)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_1D_grid_gp_regression.py", line 87, in test_grid_gp_mean_abs_error
loss.backward()
File "/usr/local/lib/python3.6/dist-packages/torch/tensor.py", line 102, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
File "/home/gpytorch/gpytorch/functions/_matmul.py", line 34, in backward
rhs = self.saved_tensors[0]
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
----------------------------------------------------------------------
Ran 2 tests in 0.088s
FAILED (errors=1)
|
RuntimeError
|
def inv_matmul(self, right_tensor, left_tensor=None):
"""
Computes a linear solve (w.r.t self = :math:`A`) with several right hand sides :math:`R`.
I.e. computes
... math::
\begin{equation}
A^{-1} R,
\end{equation}
where :math:`R` is :attr:`right_tensor` and :math:`A` is the LazyTensor.
If :attr:`left_tensor` is supplied, computes
... math::
\begin{equation}
L A^{-1} R,
\end{equation}
where :math:`L` is :attr:`left_tensor`. Supplying this can reduce the number of
CG calls required.
Args:
- :obj:`torch.tensor` (n x k) - Matrix :math:`R` right hand sides
- :obj:`torch.tensor` (m x n) - Optional matrix :math:`L` to perform left multiplication with
Returns:
- :obj:`torch.tensor` - :math:`A^{-1}R` or :math:`LA^{-1}R`.
"""
if not self.is_square:
raise RuntimeError(
"inv_matmul only operates on (batches of) square (positive semi-definite) LazyTensors. "
"Got a {} of size {}.".format(self.__class__.__name__, self.size())
)
if self.dim() == 2 and right_tensor.dim() == 1:
if self.shape[-1] != right_tensor.numel():
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with right-hand-side Tensor (size={}).".format(
self.shape, right_tensor.shape
)
)
func = InvMatmul(
self.representation_tree(),
has_left=(left_tensor is not None),
)
if left_tensor is None:
return func(right_tensor, *self.representation())
else:
return func(left_tensor, right_tensor, *self.representation())
|
def inv_matmul(self, right_tensor, left_tensor=None):
"""
Computes a linear solve (w.r.t self = :math:`A`) with several right hand sides :math:`R`.
I.e. computes
... math::
\begin{equation}
A^{-1} R,
\end{equation}
where :math:`R` is :attr:`right_tensor` and :math:`A` is the LazyTensor.
If :attr:`left_tensor` is supplied, computes
... math::
\begin{equation}
L A^{-1} R,
\end{equation}
where :math:`L` is :attr:`left_tensor`. Supplying this can reduce the number of
CG calls required.
Args:
- :obj:`torch.tensor` (n x k) - Matrix :math:`R` right hand sides
- :obj:`torch.tensor` (m x n) - Optional matrix :math:`L` to perform left multiplication with
Returns:
- :obj:`torch.tensor` - :math:`A^{-1}R` or :math:`LA^{-1}R`.
"""
if not self.is_square:
raise RuntimeError(
"inv_matmul only operates on (batches of) square (positive semi-definite) LazyTensors. "
"Got a {} of size {}.".format(self.__class__.__name__, self.size())
)
if self.dim() == 2 and right_tensor.dim() == 1:
if self.shape[-1] != right_tensor.numel():
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with right-hand-side Tensor (size={}).".format(
self.shape, right_tensor.shape
)
)
func = InvMatmul(
self.representation_tree(),
preconditioner=self._inv_matmul_preconditioner(),
has_left=(left_tensor is not None),
)
if left_tensor is None:
return func(right_tensor, *self.representation())
else:
return func(left_tensor, right_tensor, *self.representation())
|
https://github.com/cornellius-gp/gpytorch/issues/501
|
$ python test_1D_grid_gp_regression.py
E.
======================================================================
ERROR: test_grid_gp_mean_abs_error (__main__.TestGridGPRegression)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_1D_grid_gp_regression.py", line 87, in test_grid_gp_mean_abs_error
loss.backward()
File "/usr/local/lib/python3.6/dist-packages/torch/tensor.py", line 102, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
File "/home/gpytorch/gpytorch/functions/_matmul.py", line 34, in backward
rhs = self.saved_tensors[0]
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
----------------------------------------------------------------------
Ran 2 tests in 0.088s
FAILED (errors=1)
|
RuntimeError
|
def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True):
"""
Computes an inverse quadratic form (w.r.t self) with several right hand sides.
I.e. computes tr( tensor^T self^{-1} tensor )
In addition, computes an (approximate) log determinant of the the matrix
Args:
- tensor (tensor nxk) - Vector (or matrix) for inverse quad
Returns:
- scalar - tr( tensor^T (self)^{-1} tensor )
- scalar - log determinant
"""
if not self.is_square:
raise RuntimeError(
"inv_quad_logdet only operates on (batches of) square (positive semi-definite) LazyTensors. "
"Got a {} of size {}.".format(self.__class__.__name__, self.size())
)
if inv_quad_rhs is not None:
if self.dim() == 2 and inv_quad_rhs.dim() == 1:
if self.shape[-1] != inv_quad_rhs.numel():
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with right-hand-side Tensor (size={}).".format(
self.shape, inv_quad_rhs.shape
)
)
elif self.dim() != inv_quad_rhs.dim():
raise RuntimeError(
"LazyTensor (size={}) and right-hand-side Tensor (size={}) should have the same number "
"of dimensions.".format(self.shape, inv_quad_rhs.shape)
)
elif (
self.batch_shape != inv_quad_rhs.shape[:-2]
or self.shape[-1] != inv_quad_rhs.shape[-2]
):
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with right-hand-side Tensor (size={}).".format(
self.shape, inv_quad_rhs.shape
)
)
args = self.representation()
if inv_quad_rhs is not None:
args = [inv_quad_rhs] + list(args)
probe_vectors, probe_vector_norms = self._probe_vectors_and_norms()
inv_quad_term, logdet_term = InvQuadLogDet(
representation_tree=self.representation_tree(),
matrix_shape=self.matrix_shape,
batch_shape=self.batch_shape,
dtype=self.dtype,
device=self.device,
inv_quad=(inv_quad_rhs is not None),
logdet=logdet,
probe_vectors=probe_vectors,
probe_vector_norms=probe_vector_norms,
)(*args)
if inv_quad_term.numel() and reduce_inv_quad:
inv_quad_term = inv_quad_term.sum(-1)
return inv_quad_term, logdet_term
|
def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True):
"""
Computes an inverse quadratic form (w.r.t self) with several right hand sides.
I.e. computes tr( tensor^T self^{-1} tensor )
In addition, computes an (approximate) log determinant of the the matrix
Args:
- tensor (tensor nxk) - Vector (or matrix) for inverse quad
Returns:
- scalar - tr( tensor^T (self)^{-1} tensor )
- scalar - log determinant
"""
if not self.is_square:
raise RuntimeError(
"inv_quad_logdet only operates on (batches of) square (positive semi-definite) LazyTensors. "
"Got a {} of size {}.".format(self.__class__.__name__, self.size())
)
if inv_quad_rhs is not None:
if self.dim() == 2 and inv_quad_rhs.dim() == 1:
if self.shape[-1] != inv_quad_rhs.numel():
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with right-hand-side Tensor (size={}).".format(
self.shape, inv_quad_rhs.shape
)
)
elif self.dim() != inv_quad_rhs.dim():
raise RuntimeError(
"LazyTensor (size={}) and right-hand-side Tensor (size={}) should have the same number "
"of dimensions.".format(self.shape, inv_quad_rhs.shape)
)
elif (
self.batch_shape != inv_quad_rhs.shape[:-2]
or self.shape[-1] != inv_quad_rhs.shape[-2]
):
raise RuntimeError(
"LazyTensor (size={}) cannot be multiplied with right-hand-side Tensor (size={}).".format(
self.shape, inv_quad_rhs.shape
)
)
args = self.representation()
if inv_quad_rhs is not None:
args = [inv_quad_rhs] + list(args)
probe_vectors, probe_vector_norms = self._probe_vectors_and_norms()
inv_quad_term, logdet_term = InvQuadLogDet(
representation_tree=self.representation_tree(),
matrix_shape=self.matrix_shape,
batch_shape=self.batch_shape,
dtype=self.dtype,
device=self.device,
inv_quad=(inv_quad_rhs is not None),
logdet=logdet,
preconditioner=self._preconditioner()[0],
logdet_correction=self._preconditioner()[1],
probe_vectors=probe_vectors,
probe_vector_norms=probe_vector_norms,
)(*args)
if inv_quad_term.numel() and reduce_inv_quad:
inv_quad_term = inv_quad_term.sum(-1)
return inv_quad_term, logdet_term
|
https://github.com/cornellius-gp/gpytorch/issues/501
|
$ python test_1D_grid_gp_regression.py
E.
======================================================================
ERROR: test_grid_gp_mean_abs_error (__main__.TestGridGPRegression)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test_1D_grid_gp_regression.py", line 87, in test_grid_gp_mean_abs_error
loss.backward()
File "/usr/local/lib/python3.6/dist-packages/torch/tensor.py", line 102, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
File "/home/gpytorch/gpytorch/functions/_matmul.py", line 34, in backward
rhs = self.saved_tensors[0]
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
----------------------------------------------------------------------
Ran 2 tests in 0.088s
FAILED (errors=1)
|
RuntimeError
|
def __init__(
self,
active_dims=None,
batch_size=1,
period_length_prior=None,
eps=1e-6,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
super(CosineKernel, self).__init__(
active_dims=active_dims,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
self.eps = eps
self.register_parameter(
name="raw_period_length",
parameter=torch.nn.Parameter(torch.zeros(batch_size, 1, 1)),
)
if period_length_prior is not None:
self.register_prior(
"period_length_prior",
period_length_prior,
lambda: self.period_length,
lambda v: self._set_period_length(v),
)
|
def __init__(
self,
active_dims=None,
batch_size=1,
period_length_prior=None,
eps=1e-6,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
period_length_prior = _deprecate_kwarg(
kwargs, "log_period_length_prior", "period_length_prior", period_length_prior
)
super(CosineKernel, self).__init__(
active_dims=active_dims,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
self.eps = eps
self.register_parameter(
name="raw_period_length",
parameter=torch.nn.Parameter(torch.zeros(batch_size, 1, 1)),
)
if period_length_prior is not None:
self.register_prior(
"period_length_prior",
period_length_prior,
lambda: self.period_length,
lambda v: self._set_period_length(v),
)
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
has_lengthscale=False,
ard_num_dims=None,
batch_size=1,
active_dims=None,
lengthscale_prior=None,
param_transform=softplus,
inv_param_transform=None,
eps=1e-6,
**kwargs,
):
super(Kernel, self).__init__()
if active_dims is not None and not torch.is_tensor(active_dims):
active_dims = torch.tensor(active_dims, dtype=torch.long)
self.register_buffer("active_dims", active_dims)
self.ard_num_dims = ard_num_dims
self.batch_size = batch_size
self.__has_lengthscale = has_lengthscale
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(
param_transform, inv_param_transform
)
if has_lengthscale:
self.eps = eps
lengthscale_num_dims = 1 if ard_num_dims is None else ard_num_dims
self.register_parameter(
name="raw_lengthscale",
parameter=torch.nn.Parameter(
torch.zeros(batch_size, 1, lengthscale_num_dims)
),
)
if lengthscale_prior is not None:
self.register_prior(
"lengthscale_prior",
lengthscale_prior,
lambda: self.lengthscale,
lambda v: self._set_lengthscale(v),
)
# TODO: Remove this on next official PyTorch release.
self.__pdist_supports_batch = True
|
def __init__(
self,
has_lengthscale=False,
ard_num_dims=None,
batch_size=1,
active_dims=None,
lengthscale_prior=None,
param_transform=softplus,
inv_param_transform=None,
eps=1e-6,
**kwargs,
):
lengthscale_prior = _deprecate_kwarg(
kwargs, "log_lengthscale_prior", "lengthscale_prior", lengthscale_prior
)
super(Kernel, self).__init__()
if active_dims is not None and not torch.is_tensor(active_dims):
active_dims = torch.tensor(active_dims, dtype=torch.long)
self.register_buffer("active_dims", active_dims)
self.ard_num_dims = ard_num_dims
self.batch_size = batch_size
self.__has_lengthscale = has_lengthscale
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(
param_transform, inv_param_transform
)
if has_lengthscale:
self.eps = eps
lengthscale_num_dims = 1 if ard_num_dims is None else ard_num_dims
self.register_parameter(
name="raw_lengthscale",
parameter=torch.nn.Parameter(
torch.zeros(batch_size, 1, lengthscale_num_dims)
),
)
if lengthscale_prior is not None:
self.register_prior(
"lengthscale_prior",
lengthscale_prior,
lambda: self.lengthscale,
lambda v: self._set_lengthscale(v),
)
# TODO: Remove this on next official PyTorch release.
self.__pdist_supports_batch = True
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
nu=2.5,
ard_num_dims=None,
batch_size=1,
active_dims=None,
lengthscale_prior=None,
param_transform=softplus,
inv_param_transform=None,
eps=1e-6,
**kwargs,
):
if nu not in {0.5, 1.5, 2.5}:
raise RuntimeError("nu expected to be 0.5, 1.5, or 2.5")
super(MaternKernel, self).__init__(
has_lengthscale=True,
ard_num_dims=ard_num_dims,
batch_size=batch_size,
active_dims=active_dims,
lengthscale_prior=lengthscale_prior,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
eps=eps,
)
self.nu = nu
|
def __init__(
self,
nu=2.5,
ard_num_dims=None,
batch_size=1,
active_dims=None,
lengthscale_prior=None,
param_transform=softplus,
inv_param_transform=None,
eps=1e-6,
**kwargs,
):
_deprecate_kwarg(
kwargs, "log_lengthscale_prior", "lengthscale_prior", lengthscale_prior
)
if nu not in {0.5, 1.5, 2.5}:
raise RuntimeError("nu expected to be 0.5, 1.5, or 2.5")
super(MaternKernel, self).__init__(
has_lengthscale=True,
ard_num_dims=ard_num_dims,
batch_size=batch_size,
active_dims=active_dims,
lengthscale_prior=lengthscale_prior,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
eps=eps,
)
self.nu = nu
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
active_dims=None,
batch_size=1,
lengthscale_prior=None,
period_length_prior=None,
param_transform=softplus,
inv_param_transform=None,
eps=1e-6,
**kwargs,
):
super(PeriodicKernel, self).__init__(
has_lengthscale=True,
active_dims=active_dims,
batch_size=batch_size,
lengthscale_prior=lengthscale_prior,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
eps=eps,
)
self.register_parameter(
name="raw_period_length",
parameter=torch.nn.Parameter(torch.zeros(batch_size, 1, 1)),
)
if period_length_prior is not None:
self.register_prior(
"period_length_prior",
period_length_prior,
lambda: self.period_length,
lambda v: self._set_period_length(v),
)
|
def __init__(
self,
active_dims=None,
batch_size=1,
lengthscale_prior=None,
period_length_prior=None,
param_transform=softplus,
inv_param_transform=None,
eps=1e-6,
**kwargs,
):
lengthscale_prior = _deprecate_kwarg(
kwargs, "log_lengthscale_prior", "lengthscale_prior", lengthscale_prior
)
period_length_prior = _deprecate_kwarg(
kwargs, "log_period_length_prior", "period_length_prior", period_length_prior
)
super(PeriodicKernel, self).__init__(
has_lengthscale=True,
active_dims=active_dims,
batch_size=batch_size,
lengthscale_prior=lengthscale_prior,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
eps=eps,
)
self.register_parameter(
name="raw_period_length",
parameter=torch.nn.Parameter(torch.zeros(batch_size, 1, 1)),
)
if period_length_prior is not None:
self.register_prior(
"period_length_prior",
period_length_prior,
lambda: self.period_length,
lambda v: self._set_period_length(v),
)
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
ard_num_dims=None,
batch_size=1,
active_dims=None,
lengthscale_prior=None,
param_transform=softplus,
inv_param_transform=None,
eps=1e-6,
**kwargs,
):
super(RBFKernel, self).__init__(
has_lengthscale=True,
ard_num_dims=ard_num_dims,
batch_size=batch_size,
active_dims=active_dims,
lengthscale_prior=lengthscale_prior,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
eps=eps,
)
|
def __init__(
self,
ard_num_dims=None,
batch_size=1,
active_dims=None,
lengthscale_prior=None,
param_transform=softplus,
inv_param_transform=None,
eps=1e-6,
**kwargs,
):
_deprecate_kwarg(
kwargs, "log_lengthscale_prior", "lengthscale_prior", lengthscale_prior
)
super(RBFKernel, self).__init__(
has_lengthscale=True,
ard_num_dims=ard_num_dims,
batch_size=batch_size,
active_dims=active_dims,
lengthscale_prior=lengthscale_prior,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
eps=eps,
)
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
base_kernel,
batch_size=1,
outputscale_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
super(ScaleKernel, self).__init__(has_lengthscale=False, batch_size=batch_size)
self.base_kernel = base_kernel
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(
param_transform, inv_param_transform
)
self.register_parameter(
name="raw_outputscale", parameter=torch.nn.Parameter(torch.zeros(batch_size))
)
if outputscale_prior is not None:
self.register_prior(
"outputscale_prior",
outputscale_prior,
lambda: self.outputscale,
lambda v: self._set_outputscale(v),
)
|
def __init__(
self,
base_kernel,
batch_size=1,
outputscale_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
outputscale_prior = _deprecate_kwarg(
kwargs, "log_outputscale_prior", "outputscale_prior", outputscale_prior
)
super(ScaleKernel, self).__init__(has_lengthscale=False, batch_size=batch_size)
self.base_kernel = base_kernel
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(
param_transform, inv_param_transform
)
self.register_parameter(
name="raw_outputscale", parameter=torch.nn.Parameter(torch.zeros(batch_size))
)
if outputscale_prior is not None:
self.register_prior(
"outputscale_prior",
outputscale_prior,
lambda: self.outputscale,
lambda v: self._set_outputscale(v),
)
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
num_mixtures=None,
ard_num_dims=1,
batch_size=1,
active_dims=None,
eps=1e-6,
mixture_scales_prior=None,
mixture_means_prior=None,
mixture_weights_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
if num_mixtures is None:
raise RuntimeError("num_mixtures is a required argument")
if (
mixture_means_prior is not None
or mixture_scales_prior is not None
or mixture_weights_prior is not None
):
logger.warning("Priors not implemented for SpectralMixtureKernel")
# This kernel does not use the default lengthscale
super(SpectralMixtureKernel, self).__init__(
active_dims=active_dims,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
self.num_mixtures = num_mixtures
self.batch_size = batch_size
self.ard_num_dims = ard_num_dims
self.eps = eps
self.register_parameter(
name="raw_mixture_weights",
parameter=torch.nn.Parameter(torch.zeros(self.batch_size, self.num_mixtures)),
)
ms_shape = torch.Size([self.batch_size, self.num_mixtures, 1, self.ard_num_dims])
self.register_parameter(
name="raw_mixture_means", parameter=torch.nn.Parameter(torch.zeros(ms_shape))
)
self.register_parameter(
name="raw_mixture_scales", parameter=torch.nn.Parameter(torch.zeros(ms_shape))
)
|
def __init__(
self,
num_mixtures=None,
ard_num_dims=1,
batch_size=1,
active_dims=None,
eps=1e-6,
mixture_scales_prior=None,
mixture_means_prior=None,
mixture_weights_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
mixture_scales_prior = _deprecate_kwarg(
kwargs, "log_mixture_scales_prior", "mixture_scales_prior", mixture_scales_prior
)
mixture_means_prior = _deprecate_kwarg(
kwargs, "log_mixture_means_prior", "mixture_means_prior", mixture_means_prior
)
mixture_weights_prior = _deprecate_kwarg(
kwargs,
"log_mixture_weights_prior",
"mixture_weights_prior",
mixture_weights_prior,
)
if num_mixtures is None:
raise RuntimeError("num_mixtures is a required argument")
if (
mixture_means_prior is not None
or mixture_scales_prior is not None
or mixture_weights_prior is not None
):
logger.warning("Priors not implemented for SpectralMixtureKernel")
# This kernel does not use the default lengthscale
super(SpectralMixtureKernel, self).__init__(
active_dims=active_dims,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
self.num_mixtures = num_mixtures
self.batch_size = batch_size
self.ard_num_dims = ard_num_dims
self.eps = eps
self.register_parameter(
name="raw_mixture_weights",
parameter=torch.nn.Parameter(torch.zeros(self.batch_size, self.num_mixtures)),
)
ms_shape = torch.Size([self.batch_size, self.num_mixtures, 1, self.ard_num_dims])
self.register_parameter(
name="raw_mixture_means", parameter=torch.nn.Parameter(torch.zeros(ms_shape))
)
self.register_parameter(
name="raw_mixture_scales", parameter=torch.nn.Parameter(torch.zeros(ms_shape))
)
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
noise_prior=None,
batch_size=1,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
noise_covar = HomoskedasticNoise(
noise_prior=noise_prior,
batch_size=batch_size,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
super().__init__(noise_covar=noise_covar)
|
def __init__(
self,
noise_prior=None,
batch_size=1,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
noise_prior = _deprecate_kwarg(
kwargs, "log_noise_prior", "noise_prior", noise_prior
)
noise_covar = HomoskedasticNoise(
noise_prior=noise_prior,
batch_size=batch_size,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
super().__init__(noise_covar=noise_covar)
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def noise(self, value):
self.noise_covar.initialize(noise=value)
|
def noise(self, value):
self.noise_covar.initialize(value)
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
num_tasks,
rank=0,
task_correlation_prior=None,
batch_size=1,
noise_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
"""
Args:
num_tasks (int): Number of tasks.
rank (int): The rank of the task noise covariance matrix to fit. If `rank` is set to 0,
then a diagonal covariance matrix is fit.
task_correlation_prior (:obj:`gpytorch.priors.Prior`): Prior to use over the task noise correlaton matrix.
Only used when `rank` > 0.
"""
noise_covar = MultitaskHomoskedasticNoise(
num_tasks=num_tasks,
noise_prior=noise_prior,
batch_size=batch_size,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
super().__init__(
num_tasks=num_tasks,
noise_covar=noise_covar,
rank=rank,
task_correlation_prior=task_correlation_prior,
batch_size=batch_size,
)
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(
param_transform, inv_param_transform
)
self.register_parameter(
name="raw_noise", parameter=torch.nn.Parameter(torch.zeros(batch_size, 1))
)
|
def __init__(
self,
num_tasks,
rank=0,
task_correlation_prior=None,
batch_size=1,
noise_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
"""
Args:
num_tasks (int): Number of tasks.
rank (int): The rank of the task noise covariance matrix to fit. If `rank` is set to 0,
then a diagonal covariance matrix is fit.
task_correlation_prior (:obj:`gpytorch.priors.Prior`): Prior to use over the task noise correlaton matrix.
Only used when `rank` > 0.
"""
task_correlation_prior = _deprecate_kwarg(
kwargs, "task_prior", "task_correlation_prior", task_correlation_prior
)
noise_covar = MultitaskHomoskedasticNoise(
num_tasks=num_tasks,
noise_prior=noise_prior,
batch_size=batch_size,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
super().__init__(
num_tasks=num_tasks,
noise_covar=noise_covar,
rank=rank,
task_correlation_prior=task_correlation_prior,
batch_size=batch_size,
)
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(
param_transform, inv_param_transform
)
self.register_parameter(
name="raw_noise", parameter=torch.nn.Parameter(torch.zeros(batch_size, 1))
)
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __init__(
self,
num_tasks,
rank=0,
task_prior=None,
batch_size=1,
noise_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
"""
Args:
num_tasks (int): Number of tasks.
rank (int): The rank of the task noise covariance matrix to fit. If `rank` is set to 0,
then a diagonal covariance matrix is fit.
task_prior (:obj:`gpytorch.priors.Prior`): Prior to use over the task noise covariance matrix if
`rank` > 0, or a prior over the log of just the diagonal elements, if `rank` == 0.
"""
super(Likelihood, self).__init__()
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(
param_transform, inv_param_transform
)
self.register_parameter(
name="raw_noise", parameter=torch.nn.Parameter(torch.zeros(batch_size, 1))
)
if rank == 0:
self.register_parameter(
name="raw_task_noises",
parameter=torch.nn.Parameter(torch.zeros(batch_size, num_tasks)),
)
if task_prior is not None:
raise RuntimeError("Cannot set a `task_prior` if rank=0")
else:
self.register_parameter(
name="task_noise_covar_factor",
parameter=torch.nn.Parameter(torch.randn(batch_size, num_tasks, rank)),
)
if task_prior is not None:
self.register_prior(
"MultitaskErrorCovariancePrior", task_prior, self._eval_covar_matrix
)
self.num_tasks = num_tasks
self.rank = rank
|
def __init__(
self,
num_tasks,
rank=0,
task_prior=None,
batch_size=1,
noise_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs,
):
"""
Args:
num_tasks (int): Number of tasks.
rank (int): The rank of the task noise covariance matrix to fit. If `rank` is set to 0,
then a diagonal covariance matrix is fit.
task_prior (:obj:`gpytorch.priors.Prior`): Prior to use over the task noise covariance matrix if
`rank` > 0, or a prior over the log of just the diagonal elements, if `rank` == 0.
"""
noise_prior = _deprecate_kwarg(
kwargs, "log_noise_prior", "noise_prior", noise_prior
)
super(Likelihood, self).__init__()
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(
param_transform, inv_param_transform
)
self.register_parameter(
name="raw_noise", parameter=torch.nn.Parameter(torch.zeros(batch_size, 1))
)
if rank == 0:
self.register_parameter(
name="raw_task_noises",
parameter=torch.nn.Parameter(torch.zeros(batch_size, num_tasks)),
)
if task_prior is not None:
raise RuntimeError("Cannot set a `task_prior` if rank=0")
else:
self.register_parameter(
name="task_noise_covar_factor",
parameter=torch.nn.Parameter(torch.randn(batch_size, num_tasks, rank)),
)
if task_prior is not None:
self.register_prior(
"MultitaskErrorCovariancePrior", task_prior, self._eval_covar_matrix
)
self.num_tasks = num_tasks
self.rank = rank
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def initialize(self, **kwargs):
"""
Set a value for a parameter
kwargs: (param_name, value) - parameter to initialize
Value can take the form of a tensor, a float, or an int
"""
for name, val in kwargs.items():
if isinstance(val, int):
val = float(val)
if not hasattr(self, name):
raise AttributeError(
"Unknown parameter {p} for {c}".format(
p=name, c=self.__class__.__name__
)
)
elif name not in self._parameters:
setattr(self, name, val)
elif torch.is_tensor(val):
try:
self.__getattr__(name).data.copy_(val.expand_as(self.__getattr__(name)))
except RuntimeError:
self.__getattr__(name).data.copy_(val.view_as(self.__getattr__(name)))
elif isinstance(val, float):
self.__getattr__(name).data.fill_(val)
else:
raise AttributeError(
"Type {t} not valid for initializing parameter {p}".format(
t=type(val), p=name
)
)
# Ensure value is contained in support of prior (if present)
prior_name = "_".join([name, "prior"])
if prior_name in self._priors:
prior, closure, _ = self._priors[prior_name]
try:
prior._validate_sample(closure())
except ValueError as e:
raise ValueError(
"Invalid input value for prior {}. Error:\n{}".format(prior_name, e)
)
return self
|
def initialize(self, **kwargs):
# TODO: Change to initialize actual parameter (e.g. lengthscale) rather than untransformed parameter.
"""
Set a value for a parameter
kwargs: (param_name, value) - parameter to initialize
Value can take the form of a tensor, a float, or an int
"""
from .utils.log_deprecation import MODULES_WITH_LOG_PARAMS
for name, val in kwargs.items():
if isinstance(val, int):
val = float(val)
if (
any(isinstance(self, mod_type) for mod_type in MODULES_WITH_LOG_PARAMS)
and "log_" in name
):
base_name = name.split("log_")[1]
name = "raw_" + base_name
if not torch.is_tensor(val):
val = self._inv_param_transform(torch.tensor(val).exp()).item()
else:
val = self._inv_param_transform(val.exp())
if not hasattr(self, name):
raise AttributeError(
"Unknown parameter {p} for {c}".format(
p=name, c=self.__class__.__name__
)
)
elif name not in self._parameters:
setattr(self, name, val)
elif torch.is_tensor(val):
try:
self.__getattr__(name).data.copy_(val.expand_as(self.__getattr__(name)))
except RuntimeError:
self.__getattr__(name).data.copy_(val.view_as(self.__getattr__(name)))
elif isinstance(val, float):
self.__getattr__(name).data.fill_(val)
else:
raise AttributeError(
"Type {t} not valid for initializing parameter {p}".format(
t=type(val), p=name
)
)
# Ensure value is contained in support of prior (if present)
prior_name = "_".join([name, "prior"])
if prior_name in self._priors:
prior, closure, _ = self._priors[prior_name]
try:
prior._validate_sample(closure())
except ValueError as e:
raise ValueError(
"Invalid input value for prior {}. Error:\n{}".format(prior_name, e)
)
return self
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError as e:
try:
return super().__getattribute__(name)
except AttributeError:
raise e
|
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError as e:
from .utils.log_deprecation import LOG_DEPRECATION_MSG, MODULES_WITH_LOG_PARAMS
if (
any(isinstance(self, mod_type) for mod_type in MODULES_WITH_LOG_PARAMS)
and "log_" in name
):
base_name = name.split("log_")[1] # e.g. log_lengthscale -> lengthscale
raw_name = "raw_" + base_name
warnings.warn(
LOG_DEPRECATION_MSG.format(log_name=name, name=raw_name),
DeprecationWarning,
)
return (
super().__getattribute__(base_name).log()
) # Get real param value and transform to log
else:
try:
return super().__getattribute__(name)
except AttributeError:
raise e
|
https://github.com/cornellius-gp/gpytorch/issues/478
|
import gpytorch
gl = gpytorch.likelihoods.GaussianLikelihood()
gl.initialize(noise=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File ".../gpytorch/gpytorch/module.py", line 89, in initialize
setattr(self, name, val)
File "../lib/python3.6/site-packages/torch/nn/modules/module.py", line 579, in __setattr__
object.__setattr__(self, name, value)
File ".../gpytorch/gpytorch/likelihoods/gaussian_likelihood.py", line 63, in noise
self.noise_covar.initialize(value)
TypeError: initialize() takes 1 positional argument but 2 were given
|
TypeError
|
def interpolate(self, x_grid, x_target, interp_points=range(-2, 2)):
# Do some boundary checking
grid_mins = x_grid.min(0)[0]
grid_maxs = x_grid.max(0)[0]
x_target_min = x_target.min(0)[0]
x_target_max = x_target.min(0)[0]
lt_min_mask = (x_target_min - grid_mins).lt(-1e-7)
gt_max_mask = (x_target_max - grid_maxs).gt(1e-7)
if lt_min_mask.sum().item():
first_out_of_range = lt_min_mask.nonzero().squeeze(1)[0].item()
raise RuntimeError(
(
"Received data that was out of bounds for the specified grid. "
"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, "
"max = {0:.3f}"
).format(
grid_mins[first_out_of_range].item(),
grid_maxs[first_out_of_range].item(),
x_target_min[first_out_of_range].item(),
x_target_max[first_out_of_range].item(),
)
)
if gt_max_mask.sum().item():
first_out_of_range = gt_max_mask.nonzero().squeeze(1)[0].item()
raise RuntimeError(
(
"Received data that was out of bounds for the specified grid. "
"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, "
"max = {0:.3f}"
).format(
grid_mins[first_out_of_range].item(),
grid_maxs[first_out_of_range].item(),
x_target_min[first_out_of_range].item(),
x_target_max[first_out_of_range].item(),
)
)
# Now do interpolation
interp_points = torch.tensor(
interp_points, dtype=x_grid.dtype, device=x_grid.device
)
interp_points_flip = interp_points.flip(0)
num_grid_points = x_grid.size(0)
num_target_points = x_target.size(0)
num_dim = x_target.size(-1)
num_coefficients = len(interp_points)
interp_values = torch.ones(
num_target_points,
num_coefficients**num_dim,
dtype=x_grid.dtype,
device=x_grid.device,
)
interp_indices = torch.zeros(
num_target_points,
num_coefficients**num_dim,
dtype=torch.long,
device=x_grid.device,
)
for i in range(num_dim):
grid_delta = x_grid[1, i] - x_grid[0, i]
lower_grid_pt_idxs = torch.floor(
(x_target[:, i] - x_grid[0, i]) / grid_delta
).squeeze()
lower_pt_rel_dists = (
x_target[:, i] - x_grid[0, i]
) / grid_delta - lower_grid_pt_idxs
lower_grid_pt_idxs = lower_grid_pt_idxs - interp_points.max()
lower_grid_pt_idxs.detach_()
if len(lower_grid_pt_idxs.shape) == 0:
lower_grid_pt_idxs = lower_grid_pt_idxs.unsqueeze(0)
scaled_dist = lower_pt_rel_dists.unsqueeze(-1) + interp_points_flip.unsqueeze(
-2
)
dim_interp_values = self._cubic_interpolation_kernel(scaled_dist)
# Find points who's closest lower grid point is the first grid point
# This corresponds to a boundary condition that we must fix manually.
left_boundary_pts = torch.nonzero(lower_grid_pt_idxs < 1)
num_left = len(left_boundary_pts)
if num_left > 0:
left_boundary_pts.squeeze_(1)
x_grid_first = (
x_grid[:num_coefficients, i]
.unsqueeze(1)
.t()
.expand(num_left, num_coefficients)
)
grid_targets = (
x_target.select(1, i)[left_boundary_pts]
.unsqueeze(1)
.expand(num_left, num_coefficients)
)
dists = torch.abs(x_grid_first - grid_targets)
closest_from_first = torch.min(dists, 1)[1]
for j in range(num_left):
dim_interp_values[left_boundary_pts[j], :] = 0
dim_interp_values[left_boundary_pts[j], closest_from_first[j]] = 1
lower_grid_pt_idxs[left_boundary_pts[j]] = 0
right_boundary_pts = torch.nonzero(
lower_grid_pt_idxs > num_grid_points - num_coefficients
)
num_right = len(right_boundary_pts)
if num_right > 0:
right_boundary_pts.squeeze_(1)
x_grid_last = (
x_grid[-num_coefficients:, i]
.unsqueeze(1)
.t()
.expand(num_right, num_coefficients)
)
grid_targets = x_target.select(1, i)[right_boundary_pts].unsqueeze(1)
grid_targets = grid_targets.expand(num_right, num_coefficients)
dists = torch.abs(x_grid_last - grid_targets)
closest_from_last = torch.min(dists, 1)[1]
for j in range(num_right):
dim_interp_values[right_boundary_pts[j], :] = 0
dim_interp_values[right_boundary_pts[j], closest_from_last[j]] = 1
lower_grid_pt_idxs[right_boundary_pts[j]] = (
num_grid_points - num_coefficients
)
offset = (interp_points - interp_points.min()).long().unsqueeze(-2)
dim_interp_indices = lower_grid_pt_idxs.long().unsqueeze(-1) + offset
n_inner_repeat = num_coefficients**i
n_outer_repeat = num_coefficients ** (num_dim - i - 1)
index_coeff = num_grid_points ** (num_dim - i - 1)
dim_interp_indices = dim_interp_indices.unsqueeze(-1).repeat(
1, n_inner_repeat, n_outer_repeat
)
dim_interp_values = dim_interp_values.unsqueeze(-1).repeat(
1, n_inner_repeat, n_outer_repeat
)
interp_indices = interp_indices.add(
dim_interp_indices.view(num_target_points, -1).mul(index_coeff)
)
interp_values = interp_values.mul(dim_interp_values.view(num_target_points, -1))
return interp_indices, interp_values
|
def interpolate(self, x_grid, x_target, interp_points=range(-2, 2)):
# Do some boundary checking
grid_mins = x_grid.min(0)[0]
grid_maxs = x_grid.max(0)[0]
x_target_min = x_target.min(0)[0]
x_target_max = x_target.min(0)[0]
lt_min_mask = (x_target_min - grid_mins).lt(-1e-7)
gt_max_mask = (x_target_max - grid_maxs).gt(1e-7)
if lt_min_mask.sum().item():
first_out_of_range = lt_min_mask.nonzero().squeeze(1)[0].item()
raise RuntimeError(
(
"Received data that was out of bounds for the specified grid. "
"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, "
"max = {0:.3f}"
).format(
grid_mins[first_out_of_range].item(),
grid_maxs[first_out_of_range].item(),
x_target_min[first_out_of_range].item(),
x_target_max[first_out_of_range].item(),
)
)
if gt_max_mask.sum().item():
first_out_of_range = gt_max_mask.nonzero().squeeze(1)[0].item()
raise RuntimeError(
(
"Received data that was out of bounds for the specified grid. "
"Grid bounds were ({0:.3f}, {0:.3f}), but min = {0:.3f}, "
"max = {0:.3f}"
).format(
grid_mins[first_out_of_range].item(),
grid_maxs[first_out_of_range].item(),
x_target_min[first_out_of_range].item(),
x_target_max[first_out_of_range].item(),
)
)
# Now do interpolation
interp_points = torch.tensor(
interp_points, dtype=x_grid.dtype, device=x_grid.device
)
interp_points_flip = interp_points.flip(0)
num_grid_points = x_grid.size(0)
num_target_points = x_target.size(0)
num_dim = x_target.size(-1)
num_coefficients = len(interp_points)
interp_values = torch.ones(
num_target_points,
num_coefficients**num_dim,
dtype=x_grid.dtype,
device=x_grid.device,
)
interp_indices = torch.zeros(
num_target_points,
num_coefficients**num_dim,
dtype=torch.long,
device=x_grid.device,
)
for i in range(num_dim):
grid_delta = x_grid[1, i] - x_grid[0, i]
lower_grid_pt_idxs = torch.floor(
(x_target[:, i] - x_grid[0, i]) / grid_delta
).squeeze()
lower_pt_rel_dists = (
x_target[:, i] - x_grid[0, i]
) / grid_delta - lower_grid_pt_idxs
lower_grid_pt_idxs = lower_grid_pt_idxs - interp_points.max()
lower_grid_pt_idxs.detach_()
scaled_dist = lower_pt_rel_dists.unsqueeze(-1) + interp_points_flip.unsqueeze(
-2
)
dim_interp_values = self._cubic_interpolation_kernel(scaled_dist)
# Find points who's closest lower grid point is the first grid point
# This corresponds to a boundary condition that we must fix manually.
left_boundary_pts = torch.nonzero(lower_grid_pt_idxs < 1)
num_left = len(left_boundary_pts)
if num_left > 0:
left_boundary_pts.squeeze_(1)
x_grid_first = (
x_grid[:num_coefficients, i]
.unsqueeze(1)
.t()
.expand(num_left, num_coefficients)
)
grid_targets = (
x_target.select(1, i)[left_boundary_pts]
.unsqueeze(1)
.expand(num_left, num_coefficients)
)
dists = torch.abs(x_grid_first - grid_targets)
closest_from_first = torch.min(dists, 1)[1]
for j in range(num_left):
dim_interp_values[left_boundary_pts[j], :] = 0
dim_interp_values[left_boundary_pts[j], closest_from_first[j]] = 1
lower_grid_pt_idxs[left_boundary_pts[j]] = 0
right_boundary_pts = torch.nonzero(
lower_grid_pt_idxs > num_grid_points - num_coefficients
)
num_right = len(right_boundary_pts)
if num_right > 0:
right_boundary_pts.squeeze_(1)
x_grid_last = (
x_grid[-num_coefficients:, i]
.unsqueeze(1)
.t()
.expand(num_right, num_coefficients)
)
grid_targets = x_target.select(1, i)[right_boundary_pts].unsqueeze(1)
grid_targets = grid_targets.expand(num_right, num_coefficients)
dists = torch.abs(x_grid_last - grid_targets)
closest_from_last = torch.min(dists, 1)[1]
for j in range(num_right):
dim_interp_values[right_boundary_pts[j], :] = 0
dim_interp_values[right_boundary_pts[j], closest_from_last[j]] = 1
lower_grid_pt_idxs[right_boundary_pts[j]] = (
num_grid_points - num_coefficients
)
offset = (interp_points - interp_points.min()).long().unsqueeze(-2)
dim_interp_indices = lower_grid_pt_idxs.long().unsqueeze(-1) + offset
n_inner_repeat = num_coefficients**i
n_outer_repeat = num_coefficients ** (num_dim - i - 1)
index_coeff = num_grid_points ** (num_dim - i - 1)
dim_interp_indices = dim_interp_indices.unsqueeze(-1).repeat(
1, n_inner_repeat, n_outer_repeat
)
dim_interp_values = dim_interp_values.unsqueeze(-1).repeat(
1, n_inner_repeat, n_outer_repeat
)
interp_indices = interp_indices.add(
dim_interp_indices.view(num_target_points, -1).mul(index_coeff)
)
interp_values = interp_values.mul(dim_interp_values.view(num_target_points, -1))
return interp_indices, interp_values
|
https://github.com/cornellius-gp/gpytorch/issues/250
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-14-dd2c9e04445d> in <module>()
1 # Does not work
2 test_x = torch.FloatTensor([-0.1]).unsqueeze(1)
----> 3 model(test_x)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/models/exact_gp.py in __call__(self, *args, **kwargs)
142 n_train=n_train,
143 likelihood=self.likelihood,
--> 144 precomputed_cache=self.mean_cache,
145 )
146 predictive_covar, covar_cache = exact_predictive_covar(
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/functions/__init__.py in exact_predictive_mean(full_covar, full_mean, train_labels, n_train, likelihood, precomputed_cache)
89
90 full_covar = NonLazyVariable(full_covar)
---> 91 return full_covar.exact_predictive_mean(full_mean, train_labels, n_train, likelihood, precomputed_cache)
92
93
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache)
124 else:
125 return super(LazyEvaluatedKernelVariable, self).exact_predictive_mean(
--> 126 full_mean, train_labels, n_train, likelihood, precomputed_cache
127 )
128
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache)
427 else:
428 test_train_covar = self[n_train:, :n_train]
--> 429 res = test_train_covar.matmul(precomputed_cache)
430 if res.ndimension() == 3:
431 res = res.squeeze(-1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in matmul(self, tensor)
632 raise RuntimeError
633
--> 634 func = Matmul(self.representation_tree())
635 return func(tensor, *self.representation())
636
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation_tree(self)
112
113 def representation_tree(self):
--> 114 return LazyVariableRepresentationTree(self.evaluate_kernel())
115
116 def evaluate(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
57
---> 58 left_interp_indices, left_interp_values = self._compute_grid(x1)
59 if torch.equal(x1.data, x2.data):
60 right_interp_indices = left_interp_indices
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _compute_grid(self, inputs)
39 batch_size, n_data, n_dimensions = inputs.size()
40 inputs = inputs.view(batch_size * n_data, n_dimensions)
---> 41 interp_indices, interp_values = Interpolation().interpolate(Variable(self.grid), inputs)
42 interp_indices = interp_indices.view(batch_size, n_data, -1)
43 interp_values = interp_values.view(batch_size, n_data, -1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/utils/interpolation.py in interpolate(self, x_grid, x_target, interp_points)
114 dim_interp_values[left_boundary_pts[j], :] = 0
115 dim_interp_values[left_boundary_pts[j], closest_from_first[j]] = 1
--> 116 lower_grid_pt_idxs[left_boundary_pts[j]] = 0
117
118 right_boundary_pts = torch.nonzero(lower_grid_pt_idxs > num_grid_points - num_coefficients)
IndexError: too many indices for tensor of dimension 0
|
IndexError
|
def __init__(
self,
has_lengthscale=False,
ard_num_dims=None,
batch_size=1,
active_dims=None,
log_lengthscale_bounds=None,
log_lengthscale_prior=None,
eps=1e-6,
):
super(Kernel, self).__init__()
if active_dims is not None and not torch.is_tensor(active_dims):
active_dims = torch.tensor(active_dims, dtype=torch.long)
self.active_dims = active_dims
self.ard_num_dims = ard_num_dims
self.batch_size = batch_size
self.__has_lengthscale = has_lengthscale
if has_lengthscale:
lengthscale_num_dims = 1 if ard_num_dims is None else ard_num_dims
log_lengthscale_prior = _bounds_to_prior(
prior=log_lengthscale_prior, bounds=log_lengthscale_bounds
)
self.register_parameter(
name="log_lengthscale",
parameter=torch.nn.Parameter(
torch.zeros(batch_size, 1, lengthscale_num_dims)
),
prior=log_lengthscale_prior,
)
|
def __init__(
self,
has_lengthscale=False,
ard_num_dims=None,
log_lengthscale_prior=None,
active_dims=None,
batch_size=1,
log_lengthscale_bounds=None,
):
"""
The base Kernel class handles both lengthscales and ARD.
Args:
has_lengthscale (bool): If True, we will register a :obj:`torch.nn.Parameter` named `log_lengthscale`
ard_num_dims (int): If not None, the `log_lengthscale` parameter will have this many entries.
log_lengthscale_prior (:obj:`gpytorch.priors.Prior`): Prior over the log lengthscale
active_dims (list): List of data dimensions to evaluate this Kernel on.
batch_size (int): If training or testing multiple GPs simultaneously, this is how many lengthscales to
register.
log_lengthscale_bounds (tuple): Deprecated min and max values for the lengthscales. If supplied, this
now registers a :obj:`gpytorch.priors.SmoothedBoxPrior`
"""
super(Kernel, self).__init__()
if active_dims is not None and not torch.is_tensor(active_dims):
active_dims = torch.tensor(active_dims, dtype=torch.long)
self.active_dims = active_dims
self.ard_num_dims = ard_num_dims
self.has_lengthscale = has_lengthscale
if has_lengthscale:
lengthscale_num_dims = 1 if ard_num_dims is None else ard_num_dims
log_lengthscale_prior = _bounds_to_prior(
prior=log_lengthscale_prior, bounds=log_lengthscale_bounds
)
self.register_parameter(
name="log_lengthscale",
parameter=torch.nn.Parameter(
torch.zeros(batch_size, 1, lengthscale_num_dims)
),
prior=log_lengthscale_prior,
)
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def lengthscale(self):
if self.has_lengthscale:
return self.log_lengthscale.exp().clamp(self.eps, 1e5)
else:
return None
|
def lengthscale(self):
if "log_lengthscale" in self.named_parameters().keys():
return self.log_lengthscale.exp()
else:
return None
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def forward(self, x1, x2, **params):
"""
Computes the covariance between x1 and x2.
This method should be imlemented by all Kernel subclasses.
.. note::
All non-compositional kernels should use the :meth:`gpytorch.kernels.Kernel._create_input_grid`
method to create a meshgrid between x1 and x2 (if necessary).
Do not manually create the grid - this is inefficient and will cause erroneous behavior in certain
evaluation modes.
Args:
:attr:`x1` (Tensor `n x d` or `b x n x d`)
:attr:`x2` (Tensor `m x d` or `b x m x d`) - for diag mode, these must be the same inputs
Returns:
:class:`Tensor` or :class:`gpytorch.lazy.LazyTensor`.
The exact size depends on the kernel's evaluation mode:
* `full_covar`: `n x m` or `b x n x m`
* `full_covar` with `dim_groups=k`: `k x n x m` or `b x k x n x m`
* `diag`: `n` or `b x n`
* `diag` with `dim_groups=k`: `k x n` or `b x k x n`
"""
raise NotImplementedError()
|
def forward(self, x1, x2, **params):
raise NotImplementedError()
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def __call__(self, x1, x2=None, **params):
x1_, x2_ = x1, x2
# Select the active dimensions
if self.active_dims is not None:
x1_ = x1_.index_select(-1, self.active_dims)
if x2_ is not None:
x2_ = x2_.index_select(-1, self.active_dims)
# Give x1_ and x2_ a last dimension, if necessary
if x1_.ndimension() == 1:
x1_ = x1_.unsqueeze(1)
if x2_ is not None:
if x2_.ndimension() == 1:
x2_ = x2_.unsqueeze(1)
if not x1_.size(-1) == x2_.size(-1):
raise RuntimeError("x1_ and x2_ must have the same number of dimensions!")
if x2_ is None:
x2_ = x1_
return LazyEvaluatedKernelTensor(self, x1_, x2_, **params)
|
def __call__(self, x1_, x2_=None, **params):
x1, x2 = x1_, x2_
if self.active_dims is not None:
x1 = x1_.index_select(-1, self.active_dims)
if x2_ is not None:
x2 = x2_.index_select(-1, self.active_dims)
if x2 is None:
x2 = x1
# Give x1 and x2 a last dimension, if necessary
if x1.ndimension() == 1:
x1 = x1.unsqueeze(1)
if x2.ndimension() == 1:
x2 = x2.unsqueeze(1)
if not x1.size(-1) == x2.size(-1):
raise RuntimeError("x1 and x2 must have the same number of dimensions!")
return LazyEvaluatedKernelTensor(self, x1, x2)
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def __init__(
self,
num_dimensions,
variance_prior=None,
offset_prior=None,
active_dims=None,
variance_bounds=None,
offset_bounds=None,
):
super(LinearKernel, self).__init__(active_dims=active_dims)
variance_prior = _bounds_to_prior(
prior=variance_prior, bounds=variance_bounds, log_transform=False
)
self.register_parameter(
name="variance",
parameter=torch.nn.Parameter(torch.zeros(1)),
prior=variance_prior,
)
offset_prior = _bounds_to_prior(
prior=offset_prior, bounds=offset_bounds, log_transform=False
)
self.register_parameter(
name="offset",
parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)),
prior=offset_prior,
)
|
def __init__(
self,
num_dimensions,
variance_prior=None,
offset_prior=None,
active_dims=None,
variance_bounds=None,
offset_bounds=None,
):
"""
Args:
num_dimensions (int): Number of data dimensions to expect. This is necessary to create the offset parameter.
variance_prior (:obj:`gpytorch.priors.Prior`): Prior over the variance parameter (default `None`).
offset_prior (:obj:`gpytorch.priors.Prior`): Prior over the offset parameter (default `None`).
active_dims (list): List of data dimensions to operate on. `len(active_dims)` should equal `num_dimensions`.
variance_bounds (tuple, deprecated): Min and max value for the variance parameter. Deprecated, and now
creates a :obj:`gpytorch.priors.SmoothedBoxPrior`.
offset_bounds (tuple, deprecated): Min and max value for the offset parameter. Deprecated, and now creates a
:obj:'gpytorch.priors.SmoothedBoxPrior'.
"""
super(LinearKernel, self).__init__(active_dims=active_dims)
variance_prior = _bounds_to_prior(
prior=variance_prior, bounds=variance_bounds, log_transform=False
)
self.register_parameter(
name="variance",
parameter=torch.nn.Parameter(torch.zeros(1)),
prior=variance_prior,
)
offset_prior = _bounds_to_prior(
prior=offset_prior, bounds=offset_bounds, log_transform=False
)
self.register_parameter(
name="offset",
parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions)),
prior=offset_prior,
)
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def __init__(
self,
nu=2.5,
ard_num_dims=None,
batch_size=1,
active_dims=None,
eps=1e-6,
log_lengthscale_prior=None,
log_lengthscale_bounds=None,
):
if nu not in {0.5, 1.5, 2.5}:
raise RuntimeError("nu expected to be 0.5, 1.5, or 2.5")
super(MaternKernel, self).__init__(
has_lengthscale=True,
ard_num_dims=ard_num_dims,
log_lengthscale_prior=log_lengthscale_prior,
active_dims=active_dims,
batch_size=batch_size,
log_lengthscale_bounds=log_lengthscale_bounds,
)
self.nu = nu
self.eps = eps
|
def __init__(
self,
nu=2.5,
ard_num_dims=None,
log_lengthscale_prior=None,
active_dims=None,
eps=1e-8,
batch_size=1,
log_lengthscale_bounds=None,
):
if nu not in {0.5, 1.5, 2.5}:
raise RuntimeError("nu expected to be 0.5, 1.5, or 2.5")
super(MaternKernel, self).__init__(
has_lengthscale=True,
ard_num_dims=ard_num_dims,
log_lengthscale_prior=log_lengthscale_prior,
active_dims=active_dims,
batch_size=batch_size,
log_lengthscale_bounds=log_lengthscale_bounds,
)
self.nu = nu
self.eps = eps
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def forward_diag(self, x1, x2):
mean = x1.mean(1, keepdim=True).mean(0, keepdim=True)
x1_normed = x1 - mean.unsqueeze(0).unsqueeze(1)
x2_normed = x2 - mean.unsqueeze(0).unsqueeze(1)
diff = x1_normed - x2_normed
distance_over_rho = diff.pow_(2).sum(-1).sqrt()
exp_component = torch.exp(-math.sqrt(self.nu * 2) * distance_over_rho)
if self.nu == 0.5:
constant_component = 1
elif self.nu == 1.5:
constant_component = (math.sqrt(3) * distance_over_rho).add(1)
elif self.nu == 2.5:
constant_component = (
(math.sqrt(5) * distance_over_rho)
.add(1)
.add(5.0 / 3.0 * distance_over_rho**2)
)
return constant_component * exp_component
|
def forward_diag(self, x1, x2):
lengthscale = self.log_lengthscale.exp()
mean = x1.mean(1).mean(0)
x1_normed = (x1 - mean.unsqueeze(0).unsqueeze(1)).div(lengthscale)
x2_normed = (x2 - mean.unsqueeze(0).unsqueeze(1)).div(lengthscale)
diff = x1_normed - x2_normed
distance_over_rho = diff.pow_(2).sum(-1).sqrt()
exp_component = torch.exp(-math.sqrt(self.nu * 2) * distance_over_rho)
if self.nu == 0.5:
constant_component = 1
elif self.nu == 1.5:
constant_component = (math.sqrt(3) * distance_over_rho).add(1)
elif self.nu == 2.5:
constant_component = (
(math.sqrt(5) * distance_over_rho)
.add(1)
.add(5.0 / 3.0 * distance_over_rho**2)
)
return constant_component * exp_component
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def forward(self, x1, x2):
mean = x1.view(-1, 1, *list(x1.size())[2:]).mean(0, keepdim=True)
x1_, x2_ = self._create_input_grid(x1 - mean, x2 - mean)
x1_ = x1_.div(self.lengthscale)
x2_ = x2_.div(self.lengthscale)
distance = (x1_ - x2_).norm(2, dim=-1)
exp_component = torch.exp(-math.sqrt(self.nu * 2) * distance)
if self.nu == 0.5:
constant_component = 1
elif self.nu == 1.5:
constant_component = (math.sqrt(3) * distance).add(1)
elif self.nu == 2.5:
constant_component = (
(math.sqrt(5) * distance).add(1).add(5.0 / 3.0 * distance**2)
)
return constant_component * exp_component
|
def forward(self, x1, x2):
lengthscale = self.log_lengthscale.exp()
mean = x1.mean(1).mean(0)
x1_normed = (x1 - mean.unsqueeze(0).unsqueeze(1)).div(lengthscale)
x2_normed = (x2 - mean.unsqueeze(0).unsqueeze(1)).div(lengthscale)
x1_squared = x1_normed.norm(2, -1).pow(2)
x2_squared = x2_normed.norm(2, -1).pow(2)
x1_t_x_2 = torch.matmul(x1_normed, x2_normed.transpose(-1, -2))
distance_over_rho = (
x1_squared.unsqueeze(-1) + x2_squared.unsqueeze(-2) - x1_t_x_2.mul(2)
)
distance_over_rho = distance_over_rho.clamp(self.eps, 1e10).sqrt()
exp_component = torch.exp(-math.sqrt(self.nu * 2) * distance_over_rho)
if self.nu == 0.5:
constant_component = 1
elif self.nu == 1.5:
constant_component = (math.sqrt(3) * distance_over_rho).add(1)
elif self.nu == 2.5:
constant_component = (
(math.sqrt(5) * distance_over_rho)
.add(1)
.add(5.0 / 3.0 * distance_over_rho**2)
)
return constant_component * exp_component
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def forward(self, x1, x2):
covar_i = self.task_covar_module.covar_matrix
covar_x = self.data_covar_module(x1, x2)
if covar_x.size(0) == 1:
covar_x = covar_x[0]
if not isinstance(covar_x, LazyTensor):
covar_x = NonLazyTensor(covar_x)
res = KroneckerProductLazyTensor(covar_i, covar_x)
return res
|
def forward(self, x1, x2):
covar_i = self.task_covar_module.covar_matrix
covar_x = self.data_covar_module.forward(x1, x2)
if covar_x.size(0) == 1:
covar_x = covar_x[0]
if not isinstance(covar_x, LazyTensor):
covar_x = NonLazyTensor(covar_x)
res = KroneckerProductLazyTensor(covar_i, covar_x)
return res
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def __init__(
self,
active_dims=None,
batch_size=1,
eps=1e-6,
log_lengthscale_prior=None,
log_period_length_prior=None,
log_lengthscale_bounds=None,
log_period_length_bounds=None,
):
log_period_length_prior = _bounds_to_prior(
prior=log_period_length_prior, bounds=log_period_length_bounds
)
super(PeriodicKernel, self).__init__(
has_lengthscale=True,
active_dims=active_dims,
log_lengthscale_prior=log_lengthscale_prior,
log_lengthscale_bounds=log_lengthscale_bounds,
)
self.eps = eps
self.register_parameter(
name="log_period_length",
parameter=torch.nn.Parameter(torch.zeros(batch_size, 1, 1)),
prior=log_period_length_prior,
)
|
def __init__(
self,
log_lengthscale_prior=None,
log_period_length_prior=None,
eps=1e-5,
active_dims=None,
log_lengthscale_bounds=None,
log_period_length_bounds=None,
):
log_period_length_prior = _bounds_to_prior(
prior=log_period_length_prior, bounds=log_period_length_bounds
)
super(PeriodicKernel, self).__init__(
has_lengthscale=True,
active_dims=active_dims,
log_lengthscale_prior=log_lengthscale_prior,
log_lengthscale_bounds=log_lengthscale_bounds,
)
self.eps = eps
self.register_parameter(
name="log_period_length",
parameter=torch.nn.Parameter(torch.zeros(1, 1, 1)),
prior=log_period_length_prior,
)
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def forward(self, x1, x2):
x1_, x2_ = self._create_input_grid(x1, x2)
x1_ = x1_.div(self.period_length)
x2_ = x2_.div(self.period_length)
diff = torch.sum((x1_ - x2_).abs(), -1)
res = torch.sin(diff.mul(math.pi)).pow(2).mul(-2 / self.lengthscale).exp_()
return res
|
def forward(self, x1, x2):
lengthscale = (self.log_lengthscale.exp() + self.eps).sqrt_()
period_length = (self.log_period_length.exp() + self.eps).sqrt_()
diff = torch.sum((x1.unsqueeze(2) - x2.unsqueeze(1)).abs(), -1)
res = -2 * torch.sin(math.pi * diff / period_length).pow(2) / lengthscale
return res.exp()
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def forward(self, x1, x2):
x1_, x2_ = self._create_input_grid(x1, x2)
x1_ = x1_.div(self.lengthscale)
x2_ = x2_.div(self.lengthscale)
diff = (x1_ - x2_).norm(2, dim=-1)
return diff.pow(2).div_(-2).exp_()
|
def forward(self, x1, x2):
lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
return diff.pow_(2).sum(-1).mul_(-1).exp_()
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def __init__(
self,
num_mixtures=None,
ard_num_dims=1,
batch_size=1,
active_dims=None,
eps=1e-6,
log_mixture_scales_prior=None,
log_mixture_means_prior=None,
log_mixture_weights_prior=None,
n_mixtures=None,
n_dims=None,
):
if n_mixtures is not None:
warnings.warn(
"n_mixtures is deprecated. Use num_mixtures instead.", DeprecationWarning
)
num_mixtures = n_mixtures
if num_mixtures is None:
raise RuntimeError("num_mixtures is a required argument")
if n_dims is not None:
warnings.warn(
"n_dims is deprecated. Use ard_num_dims instead.", DeprecationWarning
)
ard_num_dims = n_dims
if (
log_mixture_means_prior is not None
or log_mixture_scales_prior is not None
or log_mixture_weights_prior is not None
):
logger.warning("Priors not implemented for SpectralMixtureKernel")
# This kernel does not use the default lengthscale
super(SpectralMixtureKernel, self).__init__(active_dims=active_dims)
self.num_mixtures = num_mixtures
self.batch_size = batch_size
self.ard_num_dims = ard_num_dims
self.eps = eps
self.register_parameter(
name="log_mixture_weights",
parameter=torch.nn.Parameter(torch.zeros(self.batch_size, self.num_mixtures)),
)
self.register_parameter(
name="log_mixture_means",
parameter=torch.nn.Parameter(
torch.zeros(self.batch_size, self.num_mixtures, 1, self.ard_num_dims)
),
)
self.register_parameter(
name="log_mixture_scales",
parameter=torch.nn.Parameter(
torch.zeros(self.batch_size, self.num_mixtures, 1, self.ard_num_dims)
),
)
|
def __init__(
self,
n_mixtures,
n_dims=1,
log_mixture_weight_prior=None,
log_mixture_mean_prior=None,
log_mixture_scale_prior=None,
active_dims=None,
):
self.n_mixtures = n_mixtures
self.n_dims = n_dims
if (
log_mixture_mean_prior is not None
or log_mixture_scale_prior is not None
or log_mixture_weight_prior is not None
):
logger.warning("Priors not implemented for SpectralMixtureKernel")
super(SpectralMixtureKernel, self).__init__(active_dims=active_dims)
self.register_parameter(
name="log_mixture_weights",
parameter=torch.nn.Parameter(torch.zeros(self.n_mixtures)),
)
self.register_parameter(
name="log_mixture_means",
parameter=torch.nn.Parameter(torch.zeros(self.n_mixtures, self.n_dims)),
)
self.register_parameter(
name="log_mixture_scales",
parameter=torch.nn.Parameter(torch.zeros(self.n_mixtures, self.n_dims)),
)
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def initialize_from_data(self, train_x, train_y, **kwargs):
if not torch.is_tensor(train_x) or not torch.is_tensor(train_y):
raise RuntimeError("train_x and train_y should be tensors")
if train_x.ndimension() == 1:
train_x = train_x.unsqueeze(-1)
if train_x.ndimension() == 2:
train_x = train_x.unsqueeze(0)
train_x_sort = train_x.sort(1)[0]
max_dist = train_x_sort[:, -1, :] - train_x_sort[:, 0, :]
min_dist_sort = (train_x_sort[:, 1:, :] - train_x_sort[:, :-1, :]).squeeze(0)
min_dist = torch.zeros(1, self.ard_num_dims)
for ind in range(self.ard_num_dims):
min_dist[:, ind] = min_dist_sort[(torch.nonzero(min_dist_sort[:, ind]))[0], ind]
# Inverse of lengthscales should be drawn from truncated Gaussian | N(0, max_dist^2) |
self.log_mixture_scales.data.normal_().mul_(max_dist).abs_().pow_(-1).log_()
# Draw means from Unif(0, 0.5 / minimum distance between two points)
self.log_mixture_means.data.uniform_().mul_(0.5).div_(min_dist).log_()
# Mixture weights should be roughly the stdv of the y values divided by the number of mixtures
self.log_mixture_weights.data.fill_(train_y.std() / self.num_mixtures).log_()
|
def initialize_from_data(self, train_x, train_y, **kwargs):
if not torch.is_tensor(train_x) or not torch.is_tensor(train_y):
raise RuntimeError("train_x and train_y should be tensors")
if train_x.ndimension() == 1:
train_x = train_x.unsqueeze(-1)
if train_x.ndimension() == 2:
train_x = train_x.unsqueeze(0)
train_x_sort = train_x.sort(1)[0]
max_dist = train_x_sort[:, -1, :] - train_x_sort[:, 0, :]
min_dist_sort = (train_x_sort[:, 1:, :] - train_x_sort[:, :-1, :]).squeeze(0)
min_dist = torch.zeros(1, self.n_dims)
for ind in range(self.n_dims):
min_dist[:, ind] = min_dist_sort[(torch.nonzero(min_dist_sort[:, ind]))[0], ind]
# Inverse of lengthscales should be drawn from truncated Gaussian | N(0, max_dist^2) |
self.log_mixture_scales.data.normal_().mul_(max_dist).abs_().pow_(-1).log_()
# Draw means from Unif(0, 0.5 / minimum distance between two points)
self.log_mixture_means.data.uniform_().mul_(0.5).div_(min_dist).log_()
# Mixture weights should be roughly the stdv of the y values divided by the number of mixtures
self.log_mixture_weights.data.fill_(train_y.std() / self.n_mixtures).log_()
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def forward(self, x1, x2):
batch_size, n, num_dims = x1.size()
_, m, _ = x2.size()
if not num_dims == self.ard_num_dims:
raise RuntimeError(
"The SpectralMixtureKernel expected the input to have {} dimensionality "
"(based on the ard_num_dims argument). Got {}.".format(
self.ard_num_dims, num_dims
)
)
if not batch_size == self.batch_size:
raise RuntimeError(
"The SpectralMixtureKernel expected the input to have a batch_size of {} "
"(based on the batch_size argument). Got {}.".format(
self.batch_size, batch_size
)
)
# Expand x1 and x2 to account for the number of mixtures
# Should make x1/x2 (b x k x n x d) for k mixtures
x1_ = x1.unsqueeze(1)
x2_ = x2.unsqueeze(1)
# Compute distances - scaled by appropriate parameters
x1_exp = x1_ * self.mixture_scales
x2_exp = x2_ * self.mixture_scales
x1_cos = x1_ * self.mixture_means
x2_cos = x2_ * self.mixture_means
# Create grids
x1_exp_, x2_exp_ = self._create_input_grid(x1_exp, x2_exp)
x1_cos_, x2_cos_ = self._create_input_grid(x1_cos, x2_cos)
# Compute the exponential and cosine terms
exp_term = (x1_exp_ - x2_exp_).pow_(2).mul_(-2 * math.pi**2)
cos_term = (x1_cos_ - x2_cos_).mul_(2 * math.pi)
res = exp_term.exp_() * cos_term.cos_()
# Product omer dimensions
res = res.prod(-1)
# Sum over mixtures
mixture_weights = self.mixture_weights
while mixture_weights.dim() < res.dim():
mixture_weights.unsqueeze_(-1)
res = (res * mixture_weights).sum(1)
return res
|
def forward(self, x1, x2):
batch_size, n, n_dims = x1.size()
_, m, _ = x2.size()
if not n_dims == self.n_dims:
raise RuntimeError("The number of dimensions doesn't match what was supplied!")
mixture_weights = self.log_mixture_weights.view(self.n_mixtures, 1, 1, 1).exp()
mixture_means = self.log_mixture_means.view(
self.n_mixtures, 1, 1, 1, self.n_dims
).exp()
mixture_scales = self.log_mixture_scales.view(
self.n_mixtures, 1, 1, 1, self.n_dims
).exp()
distance = (x1.unsqueeze(-2) - x2.unsqueeze(-3)).abs() # distance = x^(i) - z^(i)
exp_term = (distance * mixture_scales).pow_(2).mul_(-2 * math.pi**2)
cos_term = (distance * mixture_means).mul_(2 * math.pi)
res = exp_term.exp_() * cos_term.cos_()
# Product over dimensions
res = res.prod(-1)
# Sum over mixtures
res = (res * mixture_weights).sum(0)
return res
|
https://github.com/cornellius-gp/gpytorch/issues/249
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-144-12b763efd692> in <module>()
19 output = model(train_x)
20 # TODO: Fix this view call!!
---> 21 loss = -mll(output, train_y)
22 loss.backward()
23 print('Iter %d/%d - Loss: %.3f' % (i + 1, n_iter, loss.item()))
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, output, target)
49
50 # Get log determininat and first part of quadratic form
---> 51 inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
52
53 # Add terms for SGPR / when inducing points are learned
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in inv_quad_log_det(self, inv_quad_rhs, log_det)
577 matrix_size = self.size(-1)
578 batch_size = self.size(0) if self.ndimension() == 3 else None
--> 579 tensor_cls = self.tensor_cls
580
581 args = lazy_var.representation()
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in tensor_cls(self)
882 def tensor_cls(self):
883 if not hasattr(self, "_tensor_cls"):
--> 884 first_item = self.representation()[0]
885 if isinstance(first_item, Variable):
886 first_item = first_item.data
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_variable.py in representation(self)
733 representation.append(arg)
734 elif isinstance(arg, LazyVariable):
--> 735 representation += list(arg.representation())
736 else:
737 raise RuntimeError("Representation of a LazyVariable should consist only of Variables")
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in representation(self)
109
110 def representation(self):
--> 111 return self.evaluate_kernel().representation()
112
113 def representation_tree(self):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/multitask_kernel.py in forward(self, x1, x2)
61 def forward(self, x1, x2):
62 covar_i = self.task_covar_module.covar_matrix
---> 63 covar_x = self.data_covar_module.forward(x1, x2)
64 if covar_x.size(0) == 1:
65 covar_x = covar_x[0]
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in forward(self, x1, x2, **kwargs)
52
53 def forward(self, x1, x2, **kwargs):
---> 54 base_lazy_var = self._inducing_forward()
55 if x1.size(0) > 1:
56 base_lazy_var = base_lazy_var.repeat(x1.size(0), 1, 1)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_interpolation_kernel.py in _inducing_forward(self)
46 def _inducing_forward(self):
47 inducing_points_var = Variable(self.inducing_points)
---> 48 return super(GridInterpolationKernel, self).forward(inducing_points_var, inducing_points_var)
49
50 def forward_diag(self, x1, x2, **kwargs):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/grid_kernel.py in forward(self, x1, x2, **kwargs)
38 if settings.use_toeplitz.on():
39 first_item = grid_var[:, 0:1].contiguous()
---> 40 covar_columns = self.base_kernel_module(first_item, grid_var, **kwargs).evaluate()
41 covars = [ToeplitzLazyVariable(covar_columns[i : i + 1].squeeze(-2)) for i in range(n_dim)]
42 else:
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate(self)
115
116 def evaluate(self):
--> 117 return self.evaluate_kernel().evaluate()
118
119 def exact_predictive_mean(self, full_mean, train_labels, n_train, likelihood, precomputed_cache=None):
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/lazy/lazy_evaluated_kernel_variable.py in evaluate_kernel(self)
96 x2 = self.x2
97
---> 98 self._cached_kernel_eval = super(Kernel, self.kernel).__call__(x1, x2, **self.params)
99 if self.squeeze_row:
100 self._cached_kernel_eval.squeeze_(-2)
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/module.py in __call__(self, *inputs, **kwargs)
160
161 def __call__(self, *inputs, **kwargs):
--> 162 outputs = self.forward(*inputs, **kwargs)
163 if torch.is_tensor(outputs) or isinstance(outputs, RandomVariable) or isinstance(outputs, LazyVariable):
164 return outputs
~/anaconda/envs/py3/lib/python3.6/site-packages/gpytorch/kernels/rbf_kernel.py in forward(self, x1, x2)
104 def forward(self, x1, x2):
105 lengthscales = self.log_lengthscale.exp().mul(math.sqrt(2)).clamp(self.eps, 1e5)
--> 106 diff = (x1.unsqueeze(2) - x2.unsqueeze(1)).div_(lengthscales.unsqueeze(1))
107 return diff.pow_(2).sum(-1).mul_(-1).exp_()
RuntimeError: The expanded size of the tensor (1) must match the existing size (2) at non-singleton dimension 3
|
RuntimeError
|
def __call__(self, *args, **kwargs):
train_inputs = tuple(Variable(train_input) for train_input in self.train_inputs)
# Training mode: optimizing
if self.training:
if not all(
[
torch.equal(train_input, input)
for train_input, input in zip(train_inputs, args)
]
):
raise RuntimeError("You must train on the training inputs!")
return super(ExactGP, self).__call__(*args, **kwargs)
# Posterior mode
else:
if all(
[
torch.equal(train_input, input)
for train_input, input in zip(train_inputs, args)
]
):
logging.warning(
"The input matches the stored training data. "
"Did you forget to call model.train()?"
)
# Exact inference
n_train = train_inputs[0].size(0)
full_inputs = tuple(
torch.cat([train_input, input])
for train_input, input in zip(train_inputs, args)
)
full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
if not isinstance(full_output, GaussianRandomVariable):
raise RuntimeError("ExactGP.forward must return a GaussianRandomVariable")
full_mean, full_covar = full_output.representation()
train_mean = full_mean[:n_train]
test_mean = full_mean[n_train:]
train_train_covar = gpytorch.add_diag(
full_covar[:n_train, :n_train], self.likelihood.log_noise.exp()
)
train_test_covar = full_covar[:n_train, n_train:]
test_train_covar = full_covar[n_train:, :n_train]
test_test_covar = full_covar[n_train:, n_train:]
# Calculate alpha cache
if not self.has_computed_alpha:
train_residual = Variable(self.train_targets) - train_mean
alpha = gpytorch.inv_matmul(train_train_covar, train_residual)
if isinstance(full_covar, InterpolatedLazyVariable):
# We can get a better alpha cache with InterpolatedLazyVariables (Kiss-GP)
# This allows for constant time predictions
right_interp = InterpolatedLazyVariable(
test_train_covar.base_lazy_variable,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=test_train_covar.right_interp_indices,
right_interp_values=test_train_covar.right_interp_values,
)
alpha = right_interp.matmul(alpha)
self.alpha = alpha
self.has_computed_alpha = True
# Calculate root inverse cache, if necessary
# This enables fast predictive variances
if not self.has_computed_root_inv and contexts.fast_pred_var.on():
if not isinstance(train_train_covar, LazyVariable):
train_train_covar = NonLazyVariable(train_train_covar)
root_inv = train_train_covar.root_inv_decomposition().root.evaluate()
if isinstance(full_covar, InterpolatedLazyVariable):
# We can get a better root_inv cache with InterpolatedLazyVariables (Kiss-GP)
# This allows for constant time predictive variances
right_interp = InterpolatedLazyVariable(
test_train_covar.base_lazy_variable,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=test_train_covar.right_interp_indices,
right_interp_values=test_train_covar.right_interp_values,
)
root_inv = right_interp.matmul(root_inv)
self.root_inv = root_inv
self.has_computed_root_inv = True
# Calculate mean
if isinstance(full_covar, InterpolatedLazyVariable):
# Constant time predictions with InterpolatedLazyVariables (Kiss-GP)
left_interp_indices = test_train_covar.left_interp_indices
left_interp_values = test_train_covar.left_interp_values
predictive_mean = (
left_interp(left_interp_indices, left_interp_values, self.alpha)
+ test_mean
)
else:
# O(n) predictions with normal LazyVariables
predictive_mean = test_train_covar.matmul(self.alpha) + test_mean
# Calculate covar
if contexts.fast_pred_var.on():
# Compute low-rank approximation of covariance matrix - much faster!
if not isinstance(test_test_covar, LazyVariable):
test_test_covar = NonLazyVariable(test_test_covar)
if isinstance(full_covar, InterpolatedLazyVariable):
# Constant time predictive var with InterpolatedLazyVariables (Kiss-GP)
left_interp_indices = test_train_covar.left_interp_indices
left_interp_values = test_train_covar.left_interp_values
covar_correction_root = left_interp(
left_interp_indices, left_interp_values, self.root_inv
)
predictive_covar = test_test_covar + RootLazyVariable(
covar_correction_root
).mul(-1)
else:
# O(n) predictions with normal LazyVariables
covar_correction_root = test_train_covar.matmul(self.root_inv)
covar_correction = RootLazyVariable(covar_correction_root).mul(-1)
predictive_covar = test_test_covar + covar_correction
else:
# Compute full covariance matrix - much slower
if isinstance(train_test_covar, LazyVariable):
train_test_covar = train_test_covar.evaluate()
if isinstance(test_train_covar, LazyVariable):
test_train_covar = train_test_covar.t()
if not isinstance(test_test_covar, LazyVariable):
test_test_covar = NonLazyVariable(test_test_covar)
covar_correction_rhs = gpytorch.inv_matmul(
train_train_covar, train_test_covar
).mul(-1)
predictive_covar = test_test_covar + MatmulLazyVariable(
test_train_covar, covar_correction_rhs
)
return GaussianRandomVariable(predictive_mean, predictive_covar)
|
def __call__(self, *args, **kwargs):
train_inputs = tuple(Variable(train_input) for train_input in self.train_inputs)
# Training mode: optimizing
if self.training:
if not all(
[
torch.equal(train_input, input)
for train_input, input in zip(train_inputs, args)
]
):
raise RuntimeError("You must train on the training inputs!")
return super(ExactGP, self).__call__(*args, **kwargs)
# Posterior mode
else:
if all(
[
torch.equal(train_input, input)
for train_input, input in zip(train_inputs, args)
]
):
logging.warning(
"The input matches the stored training data. "
"Did you forget to call model.train()?"
)
# Exact inference
n_train = train_inputs[0].size(0)
full_inputs = tuple(
torch.cat([train_input, input])
for train_input, input in zip(train_inputs, args)
)
full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
if not isinstance(full_output, GaussianRandomVariable):
raise RuntimeError("ExactGP.forward must return a GaussianRandomVariable")
full_mean, full_covar = full_output.representation()
train_mean = full_mean[:n_train]
test_mean = full_mean[n_train:]
train_train_covar = gpytorch.add_diag(
full_covar[:n_train, :n_train], self.likelihood.log_noise.exp()
)
train_test_covar = full_covar[:n_train, n_train:]
test_train_covar = full_covar[n_train:, :n_train]
test_test_covar = full_covar[n_train:, n_train:]
# Calculate alpha cache
if not self.has_computed_alpha:
train_residual = Variable(self.train_targets) - train_mean
alpha = gpytorch.inv_matmul(train_train_covar, train_residual)
if isinstance(full_covar, InterpolatedLazyVariable):
# We can get a better alpha cache with InterpolatedLazyVariables (Kiss-GP)
# This allows for constant time predictions
right_interp = InterpolatedLazyVariable(
test_train_covar.base_lazy_variable,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=test_train_covar.right_interp_indices,
right_interp_values=test_train_covar.right_interp_values,
)
alpha = right_interp.matmul(alpha)
self.alpha = alpha
self.has_computed_alpha = True
# Calculate root inverse cache, if necessary
# This enables fast predictive variances
if not self.has_computed_root_inv and contexts.fast_pred_var.on():
if not isinstance(train_train_covar, LazyVariable):
train_train_covar = NonLazyVariable(train_train_covar)
root_inv = train_train_covar.root_inv_decomposition().root.evaluate()
if isinstance(full_covar, InterpolatedLazyVariable):
# We can get a better root_inv cache with InterpolatedLazyVariables (Kiss-GP)
# This allows for constant time predictive variances
right_interp = InterpolatedLazyVariable(
test_train_covar.base_lazy_variable,
left_interp_indices=None,
left_interp_values=None,
right_interp_indices=test_train_covar.right_interp_indices,
right_interp_values=test_train_covar.right_interp_values,
)
root_inv = right_interp.matmul(root_inv)
self.root_inv = root_inv
self.has_computed_root_inv = True
# Calculate mean
if isinstance(full_covar, InterpolatedLazyVariable):
# Constant time predictions with InterpolatedLazyVariables (Kiss-GP)
left_interp_indices = test_train_covar.left_interp_indices
left_interp_values = test_train_covar.left_interp_values
predictive_mean = (
left_interp(left_interp_indices, left_interp_values, self.alpha)
+ test_mean
)
else:
# O(n) predictions with normal LazyVariables
predictive_mean = test_train_covar.matmul(self.alpha) + test_mean
# Calculate covar
if contexts.fast_pred_var.on():
# Compute low-rank approximation of covariance matrix - much faster!
if not isinstance(test_test_covar, LazyVariable):
test_test_covar = NonLazyVariable(test_test_covar)
if isinstance(full_covar, InterpolatedLazyVariable):
# Constant time predictive var with InterpolatedLazyVariables (Kiss-GP)
left_interp_indices = test_train_covar.left_interp_indices
left_interp_values = test_train_covar.left_interp_values
covar_correction_root = left_interp(
left_interp_indices, left_interp_values, self.root_inv
)
predictive_covar = test_test_covar + RootLazyVariable(
covar_correction_root
).mul(-1)
else:
# O(n) predictions with normal LazyVariables
covar_correction_root = test_train_covar.matmul(self.root_inv)
covar_correction = RootLazyVariable(covar_correction_root).mul(-1)
predictive_covar = test_test_covar + covar_correction
else:
# Compute full covariance matrix - much slower
if isinstance(train_test_covar, LazyVariable):
train_test_covar = train_test_covar.evaluate()
if isinstance(test_train_covar, LazyVariable):
test_train_covar = train_test_covar.t()
if not isinstance(test_test_covar, LazyVariable):
test_test_covar = NonLazyVariable(test_test_covar)
covar_correction_rhs = gpytorch.inv_matmul(
train_train_covar, train_test_covar
).mul_(-1)
predictive_covar = test_test_covar + MatmulLazyVariable(
test_train_covar, covar_correction_rhs
)
return GaussianRandomVariable(predictive_mean, predictive_covar)
|
https://github.com/cornellius-gp/gpytorch/issues/67
|
test_x = Variable(torch.rand(10), requires_grad=True)
output = model(test_x)
# this works just fine
sum_of_means = output.mean().sum()
sum_of_means.backward()
test_x.grad
Variable containing:
3.4206
-3.2818
1.8668
3.5644
-0.7677
0.7666
6.4394
5.1365
-5.0451
6.0161
[torch.FloatTensor of size 10]
# this fails with said error
sum_of_vars = output.var().sum()
sum_of_vars.backward()
test_x.grad
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-18-dad61ee95fc5> in <module>()
1 sum_of_vars = output.var().sum()
----> 2 sum_of_vars.backward()
3 test_x.grad
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_dev#link-tree/torch/autograd/variable.py in backward(self, gradient, retain_graph, create_graph, retain_variables)
165 Variable.
166 """
--> 167 torch.autograd.backward(self, gradient, retain_graph, create_graph, retain_variables)
168
169 def register_hook(self, hook):
/data/users/balandat/fbsource/fbcode/buck-out/dev/gen/bento/kernels/bento_kernel_ae_dev#link-tree/torch/autograd/__init__.py in backward(variables, grad_variables, retain_graph, create_graph, retain_variables)
97
98 Variable._execution_engine.run_backward(
---> 99 variables, grad_variables, retain_graph)
100
101
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation
|
RuntimeError
|
def handle(self, *args, **options):
self.style = color_style()
self.options = options
if options["requirements"]:
req_files = options["requirements"]
elif os.path.exists("requirements.txt"):
req_files = ["requirements.txt"]
elif os.path.exists("requirements"):
req_files = [
"requirements/{0}".format(f)
for f in os.listdir("requirements")
if os.path.isfile(os.path.join("requirements", f))
and f.lower().endswith(".txt")
]
elif os.path.exists("requirements-dev.txt"):
req_files = ["requirements-dev.txt"]
elif os.path.exists("requirements-prod.txt"):
req_files = ["requirements-prod.txt"]
else:
raise CommandError("Requirements file(s) not found")
self.reqs = {}
with PipSession() as session:
for filename in req_files:
for req in parse_requirements(filename, session=session):
name = req.name if req.name else req.link.filename
# url attribute changed to link in pip version 6.1.0 and above
if LooseVersion(pip.__version__) > LooseVersion("6.0.8"):
self.reqs[name] = {
"pip_req": req,
"url": req.link,
}
else:
self.reqs[name] = {
"pip_req": req,
"url": req.url,
}
if options["github_api_token"]:
self.github_api_token = options["github_api_token"]
elif os.environ.get("GITHUB_API_TOKEN"):
self.github_api_token = os.environ.get("GITHUB_API_TOKEN")
else:
self.github_api_token = None # only 50 requests per hour
self.check_pypi()
if HAS_REQUESTS:
self.check_github()
else:
self.stdout.write(
self.style.ERROR(
"Cannot check github urls. The requests library is not installed. ( pip install requests )"
)
)
self.check_other()
|
def handle(self, *args, **options):
self.style = color_style()
self.options = options
if options["requirements"]:
req_files = options["requirements"]
elif os.path.exists("requirements.txt"):
req_files = ["requirements.txt"]
elif os.path.exists("requirements"):
req_files = [
"requirements/{0}".format(f)
for f in os.listdir("requirements")
if os.path.isfile(os.path.join("requirements", f))
and f.lower().endswith(".txt")
]
elif os.path.exists("requirements-dev.txt"):
req_files = ["requirements-dev.txt"]
elif os.path.exists("requirements-prod.txt"):
req_files = ["requirements-prod.txt"]
else:
raise CommandError("Requirements file(s) not found")
self.reqs = {}
with PipSession() as session:
for filename in req_files:
for req in parse_requirements(filename, session=session):
# url attribute changed to link in pip version 6.1.0 and above
if LooseVersion(pip.__version__) > LooseVersion("6.0.8"):
self.reqs[req.name] = {
"pip_req": req,
"url": req.link,
}
else:
self.reqs[req.name] = {
"pip_req": req,
"url": req.url,
}
if options["github_api_token"]:
self.github_api_token = options["github_api_token"]
elif os.environ.get("GITHUB_API_TOKEN"):
self.github_api_token = os.environ.get("GITHUB_API_TOKEN")
else:
self.github_api_token = None # only 50 requests per hour
self.check_pypi()
if HAS_REQUESTS:
self.check_github()
else:
print(
self.style.ERROR(
"Cannot check github urls. The requests library is not installed. ( pip install requests )"
)
)
self.check_other()
|
https://github.com/django-extensions/django-extensions/issues/1265
|
Package Version
----------------------------- ----------
django-extensions 2.1.3
$ cat r.txt
git+https://github.com/jmrivas86/django-json-widget
$ venv/bin/python -B manage.py pipchecker -r r.txt
Traceback (most recent call last):
File "manage.py", line 22, in <module>
execute_from_command_line(sys.argv)
File ".../venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File ".../venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File ".../venv/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File ".../venv/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File ".../venv/lib/python3.6/site-packages/django_extensions/management/utils.py", line 59, in inner
ret = func(self, *args, **kwargs)
File ".../venv/lib/python3.6/site-packages/django_extensions/management/commands/pipchecker.py", line 109, in handle
self.check_github()
File ".../venv/lib/python3.6/site-packages/django_extensions/management/commands/pipchecker.py", line 294, in check_github
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
TypeError: unsupported format string passed to NoneType.__format__
|
TypeError
|
def check_pypi(self):
"""
If the requirement is frozen to pypi, check for a new version.
"""
for dist in get_installed_distributions():
name = dist.project_name
if name in self.reqs.keys():
self.reqs[name]["dist"] = dist
pypi = ServerProxy("https://pypi.python.org/pypi")
for name, req in list(self.reqs.items()):
if req["url"]:
continue # skipping github packages.
elif "dist" in req:
dist = req["dist"]
dist_version = LooseVersion(dist.version)
available = pypi.package_releases(
req["pip_req"].name, True
) or pypi.package_releases(req["pip_req"].name.replace("-", "_"), True)
available_version = self._available_version(dist_version, available)
if not available_version:
msg = self.style.WARN(
"release is not on pypi (check capitalization and/or --extra-index-url)"
)
elif self.options["show_newer"] and dist_version > available_version:
msg = self.style.INFO(
"{0} available (newer installed)".format(available_version)
)
elif available_version > dist_version:
msg = self.style.INFO("{0} available".format(available_version))
else:
msg = "up to date"
del self.reqs[name]
continue
pkg_info = self.style.BOLD(
"{dist.project_name} {dist.version}".format(dist=dist)
)
else:
msg = "not installed"
pkg_info = name
self.stdout.write("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
|
def check_pypi(self):
"""
If the requirement is frozen to pypi, check for a new version.
"""
for dist in get_installed_distributions():
name = dist.project_name
if name in self.reqs.keys():
self.reqs[name]["dist"] = dist
pypi = ServerProxy("https://pypi.python.org/pypi")
for name, req in list(self.reqs.items()):
if req["url"]:
continue # skipping github packages.
elif "dist" in req:
dist = req["dist"]
dist_version = LooseVersion(dist.version)
available = pypi.package_releases(
req["pip_req"].name, True
) or pypi.package_releases(req["pip_req"].name.replace("-", "_"), True)
available_version = self._available_version(dist_version, available)
if not available_version:
msg = self.style.WARN(
"release is not on pypi (check capitalization and/or --extra-index-url)"
)
elif self.options["show_newer"] and dist_version > available_version:
msg = self.style.INFO(
"{0} available (newer installed)".format(available_version)
)
elif available_version > dist_version:
msg = self.style.INFO("{0} available".format(available_version))
else:
msg = "up to date"
del self.reqs[name]
continue
pkg_info = self.style.BOLD(
"{dist.project_name} {dist.version}".format(dist=dist)
)
else:
msg = "not installed"
pkg_info = name
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
|
https://github.com/django-extensions/django-extensions/issues/1265
|
Package Version
----------------------------- ----------
django-extensions 2.1.3
$ cat r.txt
git+https://github.com/jmrivas86/django-json-widget
$ venv/bin/python -B manage.py pipchecker -r r.txt
Traceback (most recent call last):
File "manage.py", line 22, in <module>
execute_from_command_line(sys.argv)
File ".../venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File ".../venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File ".../venv/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File ".../venv/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File ".../venv/lib/python3.6/site-packages/django_extensions/management/utils.py", line 59, in inner
ret = func(self, *args, **kwargs)
File ".../venv/lib/python3.6/site-packages/django_extensions/management/commands/pipchecker.py", line 109, in handle
self.check_github()
File ".../venv/lib/python3.6/site-packages/django_extensions/management/commands/pipchecker.py", line 294, in check_github
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
TypeError: unsupported format string passed to NoneType.__format__
|
TypeError
|
def check_github(self):
"""
If the requirement is frozen to a github url, check for new commits.
API Tokens
----------
For more than 50 github api calls per hour, pipchecker requires
authentication with the github api by settings the environemnt
variable ``GITHUB_API_TOKEN`` or setting the command flag
--github-api-token='mytoken'``.
To create a github api token for use at the command line::
curl -u 'rizumu' -d '{"scopes":["repo"], "note":"pipchecker"}' https://api.github.com/authorizations
For more info on github api tokens:
https://help.github.com/articles/creating-an-oauth-token-for-command-line-use
http://developer.github.com/v3/oauth/#oauth-authorizations-api
Requirement Format
------------------
Pipchecker gets the sha of frozen repo and checks if it is
found at the head of any branches. If it is not found then
the requirement is considered to be out of date.
Therefore, freezing at the commit hash will provide the expected
results, but if freezing at a branch or tag name, pipchecker will
not be able to determine with certainty if the repo is out of date.
Freeze at the commit hash (sha)::
git+git://github.com/django/django.git@393c268e725f5b229ecb554f3fac02cfc250d2df#egg=Django
https://github.com/django/django/archive/393c268e725f5b229ecb554f3fac02cfc250d2df.tar.gz#egg=Django
https://github.com/django/django/archive/393c268e725f5b229ecb554f3fac02cfc250d2df.zip#egg=Django
Freeze with a branch name::
git+git://github.com/django/django.git@master#egg=Django
https://github.com/django/django/archive/master.tar.gz#egg=Django
https://github.com/django/django/archive/master.zip#egg=Django
Freeze with a tag::
git+git://github.com/django/django.git@1.5b2#egg=Django
https://github.com/django/django/archive/1.5b2.tar.gz#egg=Django
https://github.com/django/django/archive/1.5b2.zip#egg=Django
Do not freeze::
git+git://github.com/django/django.git#egg=Django
"""
for name, req in list(self.reqs.items()):
req_url = req["url"]
if not req_url:
continue
req_url = str(req_url)
if req_url.startswith("git") and "github.com/" not in req_url:
continue
if req_url.endswith((".tar.gz", ".tar.bz2", ".zip")):
continue
headers = {
"content-type": "application/json",
}
if self.github_api_token:
headers["Authorization"] = "token {0}".format(self.github_api_token)
try:
path_parts = (
urlparse(req_url)
.path.split("#", 1)[0]
.strip("/")
.rstrip("/")
.split("/")
)
if len(path_parts) == 2:
user, repo = path_parts
elif "archive" in path_parts:
# Supports URL of format:
# https://github.com/django/django/archive/master.tar.gz#egg=Django
# https://github.com/django/django/archive/master.zip#egg=Django
user, repo = path_parts[:2]
repo += "@" + path_parts[-1].replace(".tar.gz", "").replace(".zip", "")
else:
self.style.ERROR("\nFailed to parse %r\n" % (req_url,))
continue
except (ValueError, IndexError) as e:
self.stdout.write(
self.style.ERROR("\nFailed to parse %r: %s\n" % (req_url, e))
)
continue
try:
test_auth = requests.get(
"https://api.github.com/django/", headers=headers
).json()
except HTTPError as e:
self.stdout.write("\n%s\n" % str(e))
return
if "message" in test_auth and test_auth["message"] == "Bad credentials":
self.stdout.write(
self.style.ERROR("\nGithub API: Bad credentials. Aborting!\n")
)
return
elif "message" in test_auth and test_auth["message"].startswith(
"API Rate Limit Exceeded"
):
self.stdout.write(
self.style.ERROR("\nGithub API: Rate Limit Exceeded. Aborting!\n")
)
return
frozen_commit_sha = None
if ".git" in repo:
repo_name, frozen_commit_full = repo.split(".git")
if frozen_commit_full.startswith("@"):
frozen_commit_sha = frozen_commit_full[1:]
elif "@" in repo:
repo_name, frozen_commit_sha = repo.split("@")
if frozen_commit_sha is None:
msg = self.style.ERROR("repo is not frozen")
if frozen_commit_sha:
branch_url = "https://api.github.com/repos/{0}/{1}/branches".format(
user, repo_name
)
branch_data = requests.get(branch_url, headers=headers).json()
frozen_commit_url = (
"https://api.github.com/repos/{0}/{1}/commits/{2}".format(
user, repo_name, frozen_commit_sha
)
)
frozen_commit_data = requests.get(frozen_commit_url, headers=headers).json()
if (
"message" in frozen_commit_data
and frozen_commit_data["message"] == "Not Found"
):
msg = self.style.ERROR(
"{0} not found in {1}. Repo may be private.".format(
frozen_commit_sha[:10], name
)
)
elif frozen_commit_data["sha"] in [
branch["commit"]["sha"] for branch in branch_data
]:
msg = self.style.BOLD("up to date")
else:
msg = self.style.INFO(
"{0} is not the head of any branch".format(
frozen_commit_data["sha"][:10]
)
)
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif frozen_commit_sha is None:
pkg_info = name
else:
pkg_info = "{0} {1}".format(name, frozen_commit_sha[:10])
self.stdout.write("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
|
def check_github(self):
"""
If the requirement is frozen to a github url, check for new commits.
API Tokens
----------
For more than 50 github api calls per hour, pipchecker requires
authentication with the github api by settings the environemnt
variable ``GITHUB_API_TOKEN`` or setting the command flag
--github-api-token='mytoken'``.
To create a github api token for use at the command line::
curl -u 'rizumu' -d '{"scopes":["repo"], "note":"pipchecker"}' https://api.github.com/authorizations
For more info on github api tokens:
https://help.github.com/articles/creating-an-oauth-token-for-command-line-use
http://developer.github.com/v3/oauth/#oauth-authorizations-api
Requirement Format
------------------
Pipchecker gets the sha of frozen repo and checks if it is
found at the head of any branches. If it is not found then
the requirement is considered to be out of date.
Therefore, freezing at the commit hash will provide the expected
results, but if freezing at a branch or tag name, pipchecker will
not be able to determine with certainty if the repo is out of date.
Freeze at the commit hash (sha)::
git+git://github.com/django/django.git@393c268e725f5b229ecb554f3fac02cfc250d2df#egg=Django
https://github.com/django/django/archive/393c268e725f5b229ecb554f3fac02cfc250d2df.tar.gz#egg=Django
https://github.com/django/django/archive/393c268e725f5b229ecb554f3fac02cfc250d2df.zip#egg=Django
Freeze with a branch name::
git+git://github.com/django/django.git@master#egg=Django
https://github.com/django/django/archive/master.tar.gz#egg=Django
https://github.com/django/django/archive/master.zip#egg=Django
Freeze with a tag::
git+git://github.com/django/django.git@1.5b2#egg=Django
https://github.com/django/django/archive/1.5b2.tar.gz#egg=Django
https://github.com/django/django/archive/1.5b2.zip#egg=Django
Do not freeze::
git+git://github.com/django/django.git#egg=Django
"""
for name, req in list(self.reqs.items()):
req_url = req["url"]
if not req_url:
continue
req_url = str(req_url)
if req_url.startswith("git") and "github.com/" not in req_url:
continue
if req_url.endswith((".tar.gz", ".tar.bz2", ".zip")):
continue
headers = {
"content-type": "application/json",
}
if self.github_api_token:
headers["Authorization"] = "token {0}".format(self.github_api_token)
try:
path_parts = (
urlparse(req_url)
.path.split("#", 1)[0]
.strip("/")
.rstrip("/")
.split("/")
)
if len(path_parts) == 2:
user, repo = path_parts
elif "archive" in path_parts:
# Supports URL of format:
# https://github.com/django/django/archive/master.tar.gz#egg=Django
# https://github.com/django/django/archive/master.zip#egg=Django
user, repo = path_parts[:2]
repo += "@" + path_parts[-1].replace(".tar.gz", "").replace(".zip", "")
else:
self.style.ERROR("\nFailed to parse %r\n" % (req_url,))
continue
except (ValueError, IndexError) as e:
print(self.style.ERROR("\nFailed to parse %r: %s\n" % (req_url, e)))
continue
try:
test_auth = requests.get(
"https://api.github.com/django/", headers=headers
).json()
except HTTPError as e:
print("\n%s\n" % str(e))
return
if "message" in test_auth and test_auth["message"] == "Bad credentials":
print(self.style.ERROR("\nGithub API: Bad credentials. Aborting!\n"))
return
elif "message" in test_auth and test_auth["message"].startswith(
"API Rate Limit Exceeded"
):
print(self.style.ERROR("\nGithub API: Rate Limit Exceeded. Aborting!\n"))
return
frozen_commit_sha = None
if ".git" in repo:
repo_name, frozen_commit_full = repo.split(".git")
if frozen_commit_full.startswith("@"):
frozen_commit_sha = frozen_commit_full[1:]
elif "@" in repo:
repo_name, frozen_commit_sha = repo.split("@")
if frozen_commit_sha is None:
msg = self.style.ERROR("repo is not frozen")
if frozen_commit_sha:
branch_url = "https://api.github.com/repos/{0}/{1}/branches".format(
user, repo_name
)
branch_data = requests.get(branch_url, headers=headers).json()
frozen_commit_url = (
"https://api.github.com/repos/{0}/{1}/commits/{2}".format(
user, repo_name, frozen_commit_sha
)
)
frozen_commit_data = requests.get(frozen_commit_url, headers=headers).json()
if (
"message" in frozen_commit_data
and frozen_commit_data["message"] == "Not Found"
):
msg = self.style.ERROR(
"{0} not found in {1}. Repo may be private.".format(
frozen_commit_sha[:10], name
)
)
elif frozen_commit_data["sha"] in [
branch["commit"]["sha"] for branch in branch_data
]:
msg = self.style.BOLD("up to date")
else:
msg = self.style.INFO(
"{0} is not the head of any branch".format(
frozen_commit_data["sha"][:10]
)
)
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif frozen_commit_sha is None:
pkg_info = name
else:
pkg_info = "{0} {1}".format(name, frozen_commit_sha[:10])
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
|
https://github.com/django-extensions/django-extensions/issues/1265
|
Package Version
----------------------------- ----------
django-extensions 2.1.3
$ cat r.txt
git+https://github.com/jmrivas86/django-json-widget
$ venv/bin/python -B manage.py pipchecker -r r.txt
Traceback (most recent call last):
File "manage.py", line 22, in <module>
execute_from_command_line(sys.argv)
File ".../venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File ".../venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File ".../venv/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File ".../venv/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File ".../venv/lib/python3.6/site-packages/django_extensions/management/utils.py", line 59, in inner
ret = func(self, *args, **kwargs)
File ".../venv/lib/python3.6/site-packages/django_extensions/management/commands/pipchecker.py", line 109, in handle
self.check_github()
File ".../venv/lib/python3.6/site-packages/django_extensions/management/commands/pipchecker.py", line 294, in check_github
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
TypeError: unsupported format string passed to NoneType.__format__
|
TypeError
|
def check_other(self):
"""
If the requirement is frozen somewhere other than pypi or github, skip.
If you have a private pypi or use --extra-index-url, consider contributing
support here.
"""
if self.reqs:
self.stdout.write(
self.style.ERROR("\nOnly pypi and github based requirements are supported:")
)
for name, req in self.reqs.items():
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif "url" in req:
pkg_info = "{url}".format(url=req["url"])
else:
pkg_info = "unknown package"
self.stdout.write(
self.style.BOLD(
"{pkg_info:40} is not a pypi or github requirement".format(
pkg_info=pkg_info
)
)
)
|
def check_other(self):
"""
If the requirement is frozen somewhere other than pypi or github, skip.
If you have a private pypi or use --extra-index-url, consider contributing
support here.
"""
if self.reqs:
print(
self.style.ERROR("\nOnly pypi and github based requirements are supported:")
)
for name, req in self.reqs.items():
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif "url" in req:
pkg_info = "{url}".format(url=req["url"])
else:
pkg_info = "unknown package"
print(
self.style.BOLD(
"{pkg_info:40} is not a pypi or github requirement".format(
pkg_info=pkg_info
)
)
)
|
https://github.com/django-extensions/django-extensions/issues/1265
|
Package Version
----------------------------- ----------
django-extensions 2.1.3
$ cat r.txt
git+https://github.com/jmrivas86/django-json-widget
$ venv/bin/python -B manage.py pipchecker -r r.txt
Traceback (most recent call last):
File "manage.py", line 22, in <module>
execute_from_command_line(sys.argv)
File ".../venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 381, in execute_from_command_line
utility.execute()
File ".../venv/lib/python3.6/site-packages/django/core/management/__init__.py", line 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File ".../venv/lib/python3.6/site-packages/django/core/management/base.py", line 316, in run_from_argv
self.execute(*args, **cmd_options)
File ".../venv/lib/python3.6/site-packages/django/core/management/base.py", line 353, in execute
output = self.handle(*args, **options)
File ".../venv/lib/python3.6/site-packages/django_extensions/management/utils.py", line 59, in inner
ret = func(self, *args, **kwargs)
File ".../venv/lib/python3.6/site-packages/django_extensions/management/commands/pipchecker.py", line 109, in handle
self.check_github()
File ".../venv/lib/python3.6/site-packages/django_extensions/management/commands/pipchecker.py", line 294, in check_github
print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
TypeError: unsupported format string passed to NoneType.__format__
|
TypeError
|
def create_app(config):
mode = config.MODE
if mode & App.GuiMode:
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import QApplication, QWidget
from feeluown.compat import QEventLoop
q_app = QApplication(sys.argv)
q_app.setQuitOnLastWindowClosed(True)
q_app.setApplicationName("FeelUOwn")
app_event_loop = QEventLoop(q_app)
asyncio.set_event_loop(app_event_loop)
class GuiApp(QWidget):
mode = App.GuiMode
def __init__(self):
super().__init__()
self.setObjectName("app")
QApplication.setWindowIcon(QIcon(QPixmap(APP_ICON)))
def closeEvent(self, e):
self.ui.mpv_widget.close()
event_loop = asyncio.get_event_loop()
event_loop.stop()
class FApp(App, GuiApp):
def __init__(self, config):
App.__init__(self, config)
GuiApp.__init__(self)
else:
FApp = App
Signal.setup_aio_support()
Resolver.setup_aio_support()
app = FApp(config)
attach_attrs(app)
Resolver.library = app.library
return app
|
def create_app(config):
mode = config.MODE
if mode & App.GuiMode:
from quamash import QEventLoop
from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import QApplication, QWidget
q_app = QApplication(sys.argv)
q_app.setQuitOnLastWindowClosed(True)
q_app.setApplicationName("FeelUOwn")
app_event_loop = QEventLoop(q_app)
asyncio.set_event_loop(app_event_loop)
class GuiApp(QWidget):
mode = App.GuiMode
def __init__(self):
super().__init__()
self.setObjectName("app")
QApplication.setWindowIcon(QIcon(QPixmap(APP_ICON)))
def closeEvent(self, e):
self.ui.mpv_widget.close()
event_loop = asyncio.get_event_loop()
event_loop.stop()
class FApp(App, GuiApp):
def __init__(self, config):
App.__init__(self, config)
GuiApp.__init__(self)
else:
FApp = App
Signal.setup_aio_support()
Resolver.setup_aio_support()
app = FApp(config)
attach_attrs(app)
Resolver.library = app.library
return app
|
https://github.com/feeluown/FeelUOwn/issues/346
|
[2020-02-15 23:44:20,386 ERROR __init__] : Task exception was never retrieved
future: <Task finished name='Task-22' coro=<fetch_album_cover_wrapper.<locals>.fetch_album_cover() done, defined at /usr/lib/python3.8/site-packages/feeluown/containers/table.py:28> exception=RuntimeError('no running event loop')>
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/feeluown/containers/table.py", line 34, in fetch_album_cover
await asyncio.sleep(random.randrange(100) / 100)
File "/usr/lib/python3.8/asyncio/tasks.py", line 637, in sleep
loop = events.get_running_loop()
RuntimeError: no running event loop
[2020-02-15 23:44:20,387 ERROR __init__] : Task exception was never retrieved
future: <Task finished name='Task-23' coro=<fetch_album_cover_wrapper.<locals>.fetch_album_cover() done, defined at /usr/lib/python3.8/site-packages/feeluown/containers/table.py:28> exception=RuntimeError('no running event loop')>
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/feeluown/containers/table.py", line 34, in fetch_album_cover
await asyncio.sleep(random.randrange(100) / 100)
File "/usr/lib/python3.8/asyncio/tasks.py", line 637, in sleep
loop = events.get_running_loop()
RuntimeError: no running event loop
[2020-02-15 23:44:20,388 ERROR __init__] : Task exception was never retrieved
future: <Task finished name='Task-24' coro=<fetch_album_cover_wrapper.<locals>.fetch_album_cover() done, defined at /usr/lib/python3.8/site-packages/feeluown/containers/table.py:28> exception=RuntimeError('no running event loop')>
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/feeluown/containers/table.py", line 34, in fetch_album_cover
await asyncio.sleep(random.randrange(100) / 100)
File "/usr/lib/python3.8/asyncio/tasks.py", line 637, in sleep
loop = events.get_running_loop()
RuntimeError: no running event loop
[2020-02-15 23:44:20,388 ERROR __init__] : Task exception was never retrieved
future: <Task finished name='Task-25' coro=<fetch_album_cover_wrapper.<locals>.fetch_album_cover() done, defined at /usr/lib/python3.8/site-packages/feeluown/containers/table.py:28> exception=RuntimeError('no running event loop')>
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/feeluown/containers/table.py", line 34, in fetch_album_cover
await asyncio.sleep(random.randrange(100) / 100)
File "/usr/lib/python3.8/asyncio/tasks.py", line 637, in sleep
loop = events.get_running_loop()
RuntimeError: no running event loop
[2020-02-15 23:44:20,389 ERROR __init__] : Task exception was never retrieved
future: <Task finished name='Task-26' coro=<fetch_album_cover_wrapper.<locals>.fetch_album_cover() done, defined at /usr/lib/python3.8/site-packages/feeluown/containers/table.py:28> exception=RuntimeError('no running event loop')>
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/feeluown/containers/table.py", line 34, in fetch_album_cover
await asyncio.sleep(random.randrange(100) / 100)
File "/usr/lib/python3.8/asyncio/tasks.py", line 637, in sleep
loop = events.get_running_loop()
RuntimeError: no running event loop
[2020-02-15 23:44:20,389 ERROR __init__] : Task exception was never retrieved
future: <Task finished name='Task-27' coro=<fetch_album_cover_wrapper.<locals>.fetch_album_cover() done, defined at /usr/lib/python3.8/site-packages/feeluown/containers/table.py:28> exception=RuntimeError('no running event loop')>
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/feeluown/containers/table.py", line 34, in fetch_album_cover
await asyncio.sleep(random.randrange(100) / 100)
File "/usr/lib/python3.8/asyncio/tasks.py", line 637, in sleep
loop = events.get_running_loop()
RuntimeError: no running event loop
[2020-02-15 23:44:20,389 ERROR __init__] : Task exception was never retrieved
future: <Task finished name='Task-28' coro=<fetch_album_cover_wrapper.<locals>.fetch_album_cover() done, defined at /usr/lib/python3.8/site-packages/feeluown/containers/table.py:28> exception=RuntimeError('no running event loop')>
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/feeluown/containers/table.py", line 34, in fetch_album_cover
await asyncio.sleep(random.randrange(100) / 100)
File "/usr/lib/python3.8/asyncio/tasks.py", line 637, in sleep
loop = events.get_running_loop()
RuntimeError: no running event loop
|
RuntimeError
|
def play_mv_by_mvid(cls, mvid):
mv_model = ControllerApi.api.get_mv_detail(mvid)
if not ControllerApi.api.is_response_ok(mv_model):
return
url_high = mv_model["url_high"]
clipboard = QApplication.clipboard()
clipboard.setText(url_high)
cls.view.ui.STATUS_BAR.showMessage("程序已经将视频的播放地址复制到剪切板", 5000)
if platform.system() == "Linux":
ControllerApi.player.pause()
ControllerApi.notify_widget.show_message(
"通知", "正在尝试调用VLC视频播放器播放MV"
)
try:
subprocess.Popen(["vlc", url_high, "--play-and-exit", "-f"])
except Exception as e:
LOG.error("call vlc player failed")
elif platform.system().lower() == "Darwin".lower():
ControllerApi.player.pause()
ControllerApi.notify_widget.show_message(
"通知", "准备调用 QuickTime Player 播放mv"
)
try:
subprocess.Popen(["open", "-a", "QuickTime Player", url_high])
except Exception as e:
LOG.error("call quicktime player failed")
|
def play_mv_by_mvid(cls, mvid):
mv_model = ControllerApi.api.get_mv_detail(mvid)
if not ControllerApi.api.is_response_ok(mv_model):
return
url_high = mv_model["url_high"]
clipboard = QApplication.clipboard()
clipboard.setText(url_high)
if platform.system() == "Linux":
ControllerApi.player.pause()
ControllerApi.notify_widget.show_message(
"通知", "正在尝试调用VLC视频播放器播放MV"
)
subprocess.Popen(["vlc", url_high, "--play-and-exit", "-f"])
elif platform.system().lower() == "Darwin".lower():
ControllerApi.player.pause()
cls.view.ui.STATUS_BAR.showMessage("准备调用 QuickTime Player 播放mv", 4000)
subprocess.Popen(["open", "-a", "QuickTime Player", url_high])
else:
cls.view.ui.STATUS_BAR.showMessage("程序已经将视频的播放地址复制到剪切板", 5000)
|
https://github.com/feeluown/FeelUOwn/issues/80
|
Traceback (most recent call last):
File "../feeluown/controller_api.py", line 46, in play_mv_by_mvid
subprocess.Popen(['vlc', url_high, '--play-and-exit', '-f'])
File "/usr/lib/python3.5/subprocess.py", line 950, in __init__
restore_signals, start_new_session)
File "/usr/lib/python3.5/subprocess.py", line 1544, in _execute_child
raise child_exception_type(errno_num, err_msg)
FileNotFoundError: [Errno 2] 没有那个文件或目录: 'vlc'
/usr/bin/FeelUOwn: 行 6: 10302 已放弃 (核心已转储)python /usr/share/feeluown-git/feeluown/main.py
|
FileNotFoundError
|
def check_pids(curmir_incs):
"""Check PIDs in curmir markers to make sure rdiff-backup not running"""
pid_re = re.compile(r"^PID\s*([0-9]+)", re.I | re.M)
def extract_pid(curmir_rp):
"""Return process ID from a current mirror marker, if any"""
match = pid_re.search(curmir_rp.get_string())
if not match:
return None
else:
return int(match.group(1))
def pid_running(pid):
"""Return True if we know if process with pid is currently running,
False if it isn't running, and None if we don't know for sure."""
if os.name == "nt":
import win32api
import win32con
import pywintypes
process = None
try:
process = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, 0, pid)
except pywintypes.error as error:
if error.winerror == 87:
# parameter incorrect, PID does not exist
return False
elif error.winerror == 5:
# access denied, means nevertheless PID still exists
return True
else:
msg = "Warning: unable to check if PID %d still running"
log.Log(msg % pid, 2)
return None # we don't know if the process is running
else:
if process:
win32api.CloseHandle(process)
return True
else:
return False
else:
try:
os.kill(pid, 0)
except ProcessLookupError: # errno.ESRCH - pid doesn't exist
return False
except OSError: # any other OS error
log.Log("Warning: unable to check if PID %d still running" % (pid,), 2)
return None # we don't know if the process is still running
else: # the process still exists
return True
for curmir_rp in curmir_incs:
assert curmir_rp.conn is Globals.local_connection, (
"Function must be called locally not over '{conn}'.".format(
conn=curmir_rp.conn
)
)
pid = extract_pid(curmir_rp)
# FIXME differentiate between don't know and know and handle err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to with OSError
if pid is not None and pid_running(pid):
log.Log.FatalError(
"""It appears that a previous rdiff-backup session with process
id %d is still running. If two different rdiff-backup processes write
the same repository simultaneously, data corruption will probably
result. To proceed with regress anyway, rerun rdiff-backup with the
--force option."""
% (pid,)
)
|
def check_pids(curmir_incs):
"""Check PIDs in curmir markers to make sure rdiff-backup not running"""
pid_re = re.compile(r"^PID\s*([0-9]+)", re.I | re.M)
def extract_pid(curmir_rp):
"""Return process ID from a current mirror marker, if any"""
match = pid_re.search(curmir_rp.get_string())
if not match:
return None
else:
return int(match.group(1))
def pid_running(pid):
"""Return True if we know if process with pid is currently running,
False if it isn't running, and None if we don't know for sure."""
if os.name == "nt":
import win32api
import win32con
import pywintypes
process = None
try:
process = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, 0, pid)
except pywintypes.error as error:
if error[0] == 87:
return False
else:
msg = "Warning: unable to check if PID %d still running"
log.Log(msg % pid, 2)
return None # we don't know if the process is running
else:
if process:
win32api.CloseHandle(process)
return True
else:
return False
else:
try:
os.kill(pid, 0)
except ProcessLookupError: # errno.ESRCH - pid doesn't exist
return False
except OSError: # any other OS error
log.Log("Warning: unable to check if PID %d still running" % (pid,), 2)
return None # we don't know if the process is still running
else: # the process still exists
return True
for curmir_rp in curmir_incs:
assert curmir_rp.conn is Globals.local_connection, (
"Function must be called locally not over '{conn}'.".format(
conn=curmir_rp.conn
)
)
pid = extract_pid(curmir_rp)
# FIXME differentiate between don't know and know and handle err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to with OSError
if pid is not None and pid_running(pid):
log.Log.FatalError(
"""It appears that a previous rdiff-backup session with process
id %d is still running. If two different rdiff-backup processes write
the same repository simultaneously, data corruption will probably
result. To proceed with regress anyway, rerun rdiff-backup with the
--force option."""
% (pid,)
)
|
https://github.com/rdiff-backup/rdiff-backup/issues/453
|
vagrant@WIN-4SPEID5E7R8 C:\Users\vagrant\Develop\rdiff-backup>rmdir /s /q \temp\bla
vagrant@WIN-4SPEID5E7R8 C:\Users\vagrant\Develop\rdiff-backup>rdiff-backup ..\rdiff-backup_testfiles\stattest1 \temp\bla
vagrant@WIN-4SPEID5E7R8 C:\Users\vagrant\Develop\rdiff-backup>rdiff-backup ..\rdiff-backup_testfiles\stattest2 \temp\bla
Exception '' raised of class '<class 'MemoryError'>':
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\robust.py", line 35, in check_common_error
return function(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Rdiff.py", line 98, in patch_local
return outrp.write_from_fileobj(patchfile)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 1462, in write_from_fileobj
copyfileobj(fp, outfp)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 79, in copyfileobj
inbuf = inputfp.read(blocksize)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 69, in read
self._add_to_outbuf_once()
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 98, in _add_to_outbuf_once
self.eof, len_inbuf_read, cycle_out = self.maker.cycle(self.inbuf)
Exception '' raised of class '<class 'MemoryError'>':
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 399, in error_check_Main
_Main(arglist)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 421, in _Main
return_val = _take_action(rps)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 358, in _take_action
action_result = _action_backup(rps[0], rps[1])
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 445, in _action_backup
backup.Mirror_and_increment(rpin, rpout, _incdir)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 781, in fast_process
if self._patch_to_temp(mirror_rp, diff_rorp, tf):
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 617, in _patch_to_temp
elif not self._patch_diff_to_temp(basis_rp, diff_rorp, new):
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 669, in _patch_diff_to_temp
self.error_handler, Rdiff.patch_local, (basis_rp, diff_rorp, new))
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\robust.py", line 35, in check_common_error
return function(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Rdiff.py", line 98, in patch_local
return outrp.write_from_fileobj(patchfile)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 1462, in write_from_fileobj
copyfileobj(fp, outfp)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 79, in copyfileobj
inbuf = inputfp.read(blocksize)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 69, in read
self._add_to_outbuf_once()
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 98, in _add_to_outbuf_once
self.eof, len_inbuf_read, cycle_out = self.maker.cycle(self.inbuf)
Traceback (most recent call last):
File "C:\Users\vagrant\Develop\rdiff-backup\build\scripts-3.7\rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 399, in error_check_Main
_Main(arglist)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 421, in _Main
return_val = _take_action(rps)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 358, in _take_action
action_result = _action_backup(rps[0], rps[1])
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 445, in _action_backup
backup.Mirror_and_increment(rpin, rpout, _incdir)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 781, in fast_process
if self._patch_to_temp(mirror_rp, diff_rorp, tf):
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 617, in _patch_to_temp
elif not self._patch_diff_to_temp(basis_rp, diff_rorp, new):
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 669, in _patch_diff_to_temp
self.error_handler, Rdiff.patch_local, (basis_rp, diff_rorp, new))
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\robust.py", line 35, in check_common_error
return function(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Rdiff.py", line 98, in patch_local
return outrp.write_from_fileobj(patchfile)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 1462, in write_from_fileobj
copyfileobj(fp, outfp)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 79, in copyfileobj
inbuf = inputfp.read(blocksize)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 69, in read
self._add_to_outbuf_once()
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 98, in _add_to_outbuf_once
self.eof, len_inbuf_read, cycle_out = self.maker.cycle(self.inbuf)
MemoryError
|
MemoryError
|
def pid_running(pid):
"""Return True if we know if process with pid is currently running,
False if it isn't running, and None if we don't know for sure."""
if os.name == "nt":
import win32api
import win32con
import pywintypes
process = None
try:
process = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, 0, pid)
except pywintypes.error as error:
if error.winerror == 87:
# parameter incorrect, PID does not exist
return False
elif error.winerror == 5:
# access denied, means nevertheless PID still exists
return True
else:
msg = "Warning: unable to check if PID %d still running"
log.Log(msg % pid, 2)
return None # we don't know if the process is running
else:
if process:
win32api.CloseHandle(process)
return True
else:
return False
else:
try:
os.kill(pid, 0)
except ProcessLookupError: # errno.ESRCH - pid doesn't exist
return False
except OSError: # any other OS error
log.Log("Warning: unable to check if PID %d still running" % (pid,), 2)
return None # we don't know if the process is still running
else: # the process still exists
return True
|
def pid_running(pid):
"""Return True if we know if process with pid is currently running,
False if it isn't running, and None if we don't know for sure."""
if os.name == "nt":
import win32api
import win32con
import pywintypes
process = None
try:
process = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, 0, pid)
except pywintypes.error as error:
if error[0] == 87:
return False
else:
msg = "Warning: unable to check if PID %d still running"
log.Log(msg % pid, 2)
return None # we don't know if the process is running
else:
if process:
win32api.CloseHandle(process)
return True
else:
return False
else:
try:
os.kill(pid, 0)
except ProcessLookupError: # errno.ESRCH - pid doesn't exist
return False
except OSError: # any other OS error
log.Log("Warning: unable to check if PID %d still running" % (pid,), 2)
return None # we don't know if the process is still running
else: # the process still exists
return True
|
https://github.com/rdiff-backup/rdiff-backup/issues/453
|
vagrant@WIN-4SPEID5E7R8 C:\Users\vagrant\Develop\rdiff-backup>rmdir /s /q \temp\bla
vagrant@WIN-4SPEID5E7R8 C:\Users\vagrant\Develop\rdiff-backup>rdiff-backup ..\rdiff-backup_testfiles\stattest1 \temp\bla
vagrant@WIN-4SPEID5E7R8 C:\Users\vagrant\Develop\rdiff-backup>rdiff-backup ..\rdiff-backup_testfiles\stattest2 \temp\bla
Exception '' raised of class '<class 'MemoryError'>':
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\robust.py", line 35, in check_common_error
return function(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Rdiff.py", line 98, in patch_local
return outrp.write_from_fileobj(patchfile)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 1462, in write_from_fileobj
copyfileobj(fp, outfp)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 79, in copyfileobj
inbuf = inputfp.read(blocksize)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 69, in read
self._add_to_outbuf_once()
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 98, in _add_to_outbuf_once
self.eof, len_inbuf_read, cycle_out = self.maker.cycle(self.inbuf)
Exception '' raised of class '<class 'MemoryError'>':
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 399, in error_check_Main
_Main(arglist)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 421, in _Main
return_val = _take_action(rps)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 358, in _take_action
action_result = _action_backup(rps[0], rps[1])
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 445, in _action_backup
backup.Mirror_and_increment(rpin, rpout, _incdir)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 781, in fast_process
if self._patch_to_temp(mirror_rp, diff_rorp, tf):
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 617, in _patch_to_temp
elif not self._patch_diff_to_temp(basis_rp, diff_rorp, new):
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 669, in _patch_diff_to_temp
self.error_handler, Rdiff.patch_local, (basis_rp, diff_rorp, new))
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\robust.py", line 35, in check_common_error
return function(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Rdiff.py", line 98, in patch_local
return outrp.write_from_fileobj(patchfile)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 1462, in write_from_fileobj
copyfileobj(fp, outfp)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 79, in copyfileobj
inbuf = inputfp.read(blocksize)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 69, in read
self._add_to_outbuf_once()
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 98, in _add_to_outbuf_once
self.eof, len_inbuf_read, cycle_out = self.maker.cycle(self.inbuf)
Traceback (most recent call last):
File "C:\Users\vagrant\Develop\rdiff-backup\build\scripts-3.7\rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 399, in error_check_Main
_Main(arglist)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 421, in _Main
return_val = _take_action(rps)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 358, in _take_action
action_result = _action_backup(rps[0], rps[1])
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Main.py", line 445, in _action_backup
backup.Mirror_and_increment(rpin, rpout, _incdir)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 781, in fast_process
if self._patch_to_temp(mirror_rp, diff_rorp, tf):
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 617, in _patch_to_temp
elif not self._patch_diff_to_temp(basis_rp, diff_rorp, new):
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\backup.py", line 669, in _patch_diff_to_temp
self.error_handler, Rdiff.patch_local, (basis_rp, diff_rorp, new))
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\robust.py", line 35, in check_common_error
return function(*args)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\Rdiff.py", line 98, in patch_local
return outrp.write_from_fileobj(patchfile)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 1462, in write_from_fileobj
copyfileobj(fp, outfp)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\rpath.py", line 79, in copyfileobj
inbuf = inputfp.read(blocksize)
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 69, in read
self._add_to_outbuf_once()
File "C:\Users\vagrant\Develop\rdiff-backup\build\lib.win-amd64-3.7\rdiff_backup\librsync.py", line 98, in _add_to_outbuf_once
self.eof, len_inbuf_read, cycle_out = self.maker.cycle(self.inbuf)
MemoryError
|
MemoryError
|
def set_case_sensitive_readwrite(self, subdir):
"""Determine if directory at rp is case sensitive by writing"""
assert not self.read_only
upper_a = subdir.append("A")
upper_a.touch()
lower_a = subdir.append("a")
if lower_a.lstat():
lower_a.delete()
upper_a.setdata()
if upper_a.lstat():
# we know that (fuse-)exFAT 1.3.0 takes 1sec to register the
# deletion (July 2020)
log.Log.FatalError(
"We're sorry but the target file system at '%s' isn't "
"deemed reliable enough for a backup. It takes too long "
"or doesn't register case insensitive deletion of files."
% subdir.get_safepath()
)
self.case_sensitive = 0
else:
upper_a.delete()
self.case_sensitive = 1
|
def set_case_sensitive_readwrite(self, subdir):
"""Determine if directory at rp is case sensitive by writing"""
assert not self.read_only
upper_a = subdir.append("A")
upper_a.touch()
lower_a = subdir.append("a")
if lower_a.lstat():
lower_a.delete()
upper_a.setdata()
assert not upper_a.lstat()
self.case_sensitive = 0
else:
upper_a.delete()
self.case_sensitive = 1
|
https://github.com/rdiff-backup/rdiff-backup/issues/38
|
Message: Found interrupted initial backup. Removing...
Exception '' raised of class '<type 'exceptions.AssertionError'>':
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/Main.py", line 306, in error_check_Main
try: Main(arglist)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/Main.py", line 326, in Main
take_action(rps)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/Main.py", line 282, in take_action
elif action == "backup": Backup(rps[0], rps[1])
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/Main.py", line 336, in Backup
rpout.conn.fs_abilities.backup_set_globals(rpin, force)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/fs_abilities.py", line 959, in backup_set_globals
dest_fsa = FSAbilities('destination').init_readwrite(Globals.rbdir)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/fs_abilities.py", line 156, in init_readwrite
self.set_case_sensitive_readwrite(subdir)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/fs_abilities.py", line 304, in set_case_sensitive_readwrite
assert not upper_a.lstat()
Traceback (most recent call last):
File "/usr/local/bin/rdiff-backup", line 30, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/Main.py", line 306, in error_check_Main
try: Main(arglist)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/Main.py", line 326, in Main
take_action(rps)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/Main.py", line 282, in take_action
elif action == "backup": Backup(rps[0], rps[1])
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/Main.py", line 336, in Backup
rpout.conn.fs_abilities.backup_set_globals(rpin, force)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/fs_abilities.py", line 959, in backup_set_globals
dest_fsa = FSAbilities('destination').init_readwrite(Globals.rbdir)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/fs_abilities.py", line 156, in init_readwrite
self.set_case_sensitive_readwrite(subdir)
File "/usr/local/lib/python2.7/dist-packages/rdiff_backup/fs_abilities.py", line 304, in set_case_sensitive_readwrite
assert not upper_a.lstat()
AssertionError
|
exceptions.AssertionError
|
def log_to_file(self, message):
"""Write the message to the log file, if possible"""
if self.log_file_open:
if self.log_file_local:
tmpstr = self.format(message, self.verbosity)
self.logfp.write(_to_bytes(tmpstr))
self.logfp.flush()
else:
self.log_file_conn.log.Log.log_to_file(message)
|
def log_to_file(self, message):
"""Write the message to the log file, if possible"""
if self.log_file_open:
if self.log_file_local:
tmpstr = self.format(message, self.verbosity)
if type(tmpstr) == str: # transform string in bytes
tmpstr = tmpstr.encode("utf-8", "backslashreplace")
self.logfp.write(tmpstr)
self.logfp.flush()
else:
self.log_file_conn.log.Log.log_to_file(message)
|
https://github.com/rdiff-backup/rdiff-backup/issues/380
|
UpdateError: 'data/some/sub/dir/changed-file/rdiff-backup.tmp.266' does not match source
Exception 'a bytes-like object is required, not 'str'' raised of class '<class 'TypeError'>':
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 434, in Backup
backup.Mirror_and_increment(rpin, rpout, incdir)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 781, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 637, in patch_to_temp
return self.matches_cached_rorp(diff_rorp, new)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 691, in matches_cached_rorp
"temp file '%s' does not match source" % new_rp.get_safepath())
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 313, in write_if_open
cls.write(error_type, rp, exc)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 293, in write
cls._log_fileobj.write(logstr)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 434, in Backup
backup.Mirror_and_increment(rpin, rpout, incdir)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 781, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 637, in patch_to_temp
return self.matches_cached_rorp(diff_rorp, new)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 691, in matches_cached_rorp
"temp file '%s' does not match source" % new_rp.get_safepath())
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 313, in write_if_open
cls.write(error_type, rp, exc)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 293, in write
cls._log_fileobj.write(logstr)
TypeError: a bytes-like object is required, not 'str'
|
TypeError
|
def log_to_term(self, message, verbosity):
"""Write message to stdout/stderr"""
if verbosity <= 2 or Globals.server:
termfp = sys.stderr.buffer
else:
termfp = sys.stdout.buffer
tmpstr = self.format(message, self.term_verbosity)
termfp.write(_to_bytes(tmpstr, encoding=sys.stdout.encoding))
|
def log_to_term(self, message, verbosity):
"""Write message to stdout/stderr"""
if verbosity <= 2 or Globals.server:
termfp = sys.stderr.buffer
else:
termfp = sys.stdout.buffer
tmpstr = self.format(message, self.term_verbosity)
if type(tmpstr) == str: # transform string in bytes
tmpstr = tmpstr.encode(sys.stdout.encoding, "backslashreplace")
termfp.write(tmpstr)
|
https://github.com/rdiff-backup/rdiff-backup/issues/380
|
UpdateError: 'data/some/sub/dir/changed-file/rdiff-backup.tmp.266' does not match source
Exception 'a bytes-like object is required, not 'str'' raised of class '<class 'TypeError'>':
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 434, in Backup
backup.Mirror_and_increment(rpin, rpout, incdir)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 781, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 637, in patch_to_temp
return self.matches_cached_rorp(diff_rorp, new)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 691, in matches_cached_rorp
"temp file '%s' does not match source" % new_rp.get_safepath())
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 313, in write_if_open
cls.write(error_type, rp, exc)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 293, in write
cls._log_fileobj.write(logstr)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 434, in Backup
backup.Mirror_and_increment(rpin, rpout, incdir)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 781, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 637, in patch_to_temp
return self.matches_cached_rorp(diff_rorp, new)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 691, in matches_cached_rorp
"temp file '%s' does not match source" % new_rp.get_safepath())
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 313, in write_if_open
cls.write(error_type, rp, exc)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 293, in write
cls._log_fileobj.write(logstr)
TypeError: a bytes-like object is required, not 'str'
|
TypeError
|
def open(cls, time_string, compress=1):
"""Open the error log, prepare for writing"""
if not Globals.isbackup_writer:
return Globals.backup_writer.log.ErrorLog.open(time_string, compress)
assert not cls._log_fileobj, "log already open"
assert Globals.isbackup_writer
base_rp = Globals.rbdir.append("error_log.%s.data" % time_string)
if compress:
cls._log_fileobj = rpath.MaybeGzip(base_rp)
else:
cls._log_fileobj = base_rp.open("wb", compress=0)
|
def open(cls, time_string, compress=1):
"""Open the error log, prepare for writing"""
if not Globals.isbackup_writer:
return Globals.backup_writer.log.ErrorLog.open(time_string, compress)
assert not cls._log_fileobj, "log already open"
assert Globals.isbackup_writer
base_rp = Globals.rbdir.append("error_log.%s.data" % (time_string,))
if compress:
cls._log_fileobj = rpath.MaybeGzip(base_rp)
else:
cls._log_fileobj = base_rp.open("wb", compress=0)
|
https://github.com/rdiff-backup/rdiff-backup/issues/380
|
UpdateError: 'data/some/sub/dir/changed-file/rdiff-backup.tmp.266' does not match source
Exception 'a bytes-like object is required, not 'str'' raised of class '<class 'TypeError'>':
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 434, in Backup
backup.Mirror_and_increment(rpin, rpout, incdir)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 781, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 637, in patch_to_temp
return self.matches_cached_rorp(diff_rorp, new)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 691, in matches_cached_rorp
"temp file '%s' does not match source" % new_rp.get_safepath())
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 313, in write_if_open
cls.write(error_type, rp, exc)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 293, in write
cls._log_fileobj.write(logstr)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 434, in Backup
backup.Mirror_and_increment(rpin, rpout, incdir)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 781, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 637, in patch_to_temp
return self.matches_cached_rorp(diff_rorp, new)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 691, in matches_cached_rorp
"temp file '%s' does not match source" % new_rp.get_safepath())
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 313, in write_if_open
cls.write(error_type, rp, exc)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 293, in write
cls._log_fileobj.write(logstr)
TypeError: a bytes-like object is required, not 'str'
|
TypeError
|
def write(cls, error_type, rp, exc):
"""Add line to log file indicating error exc with file rp"""
if not Globals.isbackup_writer:
return Globals.backup_writer.log.ErrorLog.write(error_type, rp, exc)
logstr = cls.get_log_string(error_type, rp, exc)
Log(logstr, 2)
if Globals.null_separator:
logstr += "\0"
else:
logstr = re.sub("\n", " ", logstr)
logstr += "\n"
cls._log_fileobj.write(_to_bytes(logstr))
|
def write(cls, error_type, rp, exc):
"""Add line to log file indicating error exc with file rp"""
if not Globals.isbackup_writer:
return Globals.backup_writer.log.ErrorLog.write(error_type, rp, exc)
logstr = cls.get_log_string(error_type, rp, exc)
Log(logstr, 2)
if isinstance(logstr, bytes):
logstr = logstr.decode("utf-8")
if Globals.null_separator:
logstr += "\0"
else:
logstr = re.sub("\n", " ", logstr)
logstr += "\n"
cls._log_fileobj.write(logstr)
|
https://github.com/rdiff-backup/rdiff-backup/issues/380
|
UpdateError: 'data/some/sub/dir/changed-file/rdiff-backup.tmp.266' does not match source
Exception 'a bytes-like object is required, not 'str'' raised of class '<class 'TypeError'>':
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 434, in Backup
backup.Mirror_and_increment(rpin, rpout, incdir)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 781, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 637, in patch_to_temp
return self.matches_cached_rorp(diff_rorp, new)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 691, in matches_cached_rorp
"temp file '%s' does not match source" % new_rp.get_safepath())
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 313, in write_if_open
cls.write(error_type, rp, exc)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 293, in write
cls._log_fileobj.write(logstr)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib64/python3.6/site-packages/rdiff_backup/Main.py", line 434, in Backup
backup.Mirror_and_increment(rpin, rpout, incdir)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 55, in Mirror_and_increment
DestS.patch_and_increment(dest_rpath, source_diffiter, inc_rpath)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 281, in patch_and_increment
ITR(diff.index, diff)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 781, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 637, in patch_to_temp
return self.matches_cached_rorp(diff_rorp, new)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/backup.py", line 691, in matches_cached_rorp
"temp file '%s' does not match source" % new_rp.get_safepath())
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 313, in write_if_open
cls.write(error_type, rp, exc)
File "/usr/lib64/python3.6/site-packages/rdiff_backup/log.py", line 293, in write
cls._log_fileobj.write(logstr)
TypeError: a bytes-like object is required, not 'str'
|
TypeError
|
def parse_file_desc(file_desc):
"""Parse file description returning pair (host_info, filename)
In other words, bescoto@folly.stanford.edu::/usr/bin/ls =>
("bescoto@folly.stanford.edu", "/usr/bin/ls"). The
complication is to allow for quoting of : by a \\. If the
string is not separated by :, then the host_info is None.
"""
# paths and similar objects must always be bytes
file_desc = os.fsencode(file_desc)
# match double colon not preceded by an odd number of backslashes
file_match = re.fullmatch(rb"^(?P<host>.*[^\\](?:\\\\)*)::(?P<path>.*)$", file_desc)
if file_match:
file_host = file_match.group("host")
# According to description, the backslashes must be unquoted, i.e.
# double backslashes replaced by single ones, and single ones removed.
# Hence we split along double ones, remove single ones in each element,
# and join back with a single backslash.
file_host = b"\\".join(
[x.replace(b"\\", b"") for x in re.split(rb"\\\\", file_host) if x]
)
file_path = file_match.group("path")
else:
if re.match(rb"^::", file_desc):
raise SetConnectionsException("No file host in '%s'" % file_desc)
file_host = None
file_path = file_desc
# make sure paths under Windows use / instead of \
if os.path.altsep: # only Windows has an alternative separator for paths
file_path = file_path.replace(os.fsencode(os.path.sep), b"/")
if not file_path:
raise SetConnectionsException("No file path in '%s'" % file_desc)
return (file_host, file_path)
|
def parse_file_desc(file_desc):
"""Parse file description returning pair (host_info, filename)
In other words, bescoto@folly.stanford.edu::/usr/bin/ls =>
("bescoto@folly.stanford.edu", "/usr/bin/ls"). The
complication is to allow for quoting of : by a \\. If the
string is not separated by :, then the host_info is None.
"""
def check_len(i):
if i >= len(file_desc):
raise SetConnectionsException(
"Unexpected end to file description %s" % file_desc
)
host_info_list, i, last_was_quoted = [], 0, None
file_desc = os.fsencode(file_desc) # paths and similar must always be bytes
while 1:
if i == len(file_desc):
# make sure paths under Windows use / instead of \
if os.path.altsep: # only Windows has an alternative separator for paths
file_desc = file_desc.replace(os.fsencode(os.path.sep), b"/")
return (None, file_desc)
if file_desc[i] == ord("\\"): # byte[n] is the numerical value hence ord
i = i + 1
check_len(i)
last_was_quoted = 1
elif (
file_desc[i] == ord(":")
and i > 0
and file_desc[i - 1] == ord(":")
and not last_was_quoted
):
host_info_list.pop() # Remove last colon from name
break
else:
last_was_quoted = None
host_info_list.append(file_desc[i : i + 1])
i = i + 1
check_len(i + 1)
filename = file_desc[i + 1 :]
# make sure paths under Windows use / instead of \
if os.path.altsep: # only Windows has an alternative separator for paths
filename = filename.replace(os.fsencode(os.path.sep), b"/")
return (b"".join(host_info_list), filename)
|
https://github.com/rdiff-backup/rdiff-backup/issues/395
|
rdiff-backup.exe a\ b\
Exception 'Unexpected end to file description b'a\\'' raised of class '<class 'rdiff_backup.SetConnections.SetConnectionsException'>':
File "rdiff_backup\Main.py", line 393, in error_check_Main
File "rdiff_backup\Main.py", line 410, in Main
File "rdiff_backup\SetConnections.py", line 66, in get_cmd_pairs
File "rdiff_backup\SetConnections.py", line 128, in parse_file_desc
File "rdiff_backup\SetConnections.py", line 113, in check_len
Traceback (most recent call last):
File "rdiff-backup", line 32, in <module>
File "rdiff_backup\Main.py", line 393, in error_check_Main
File "rdiff_backup\Main.py", line 410, in Main
File "rdiff_backup\SetConnections.py", line 66, in get_cmd_pairs
File "rdiff_backup\SetConnections.py", line 128, in parse_file_desc
File "rdiff_backup\SetConnections.py", line 113, in check_len
rdiff_backup.SetConnections.SetConnectionsException: Unexpected end to file description b'a\\'
[14104] Failed to execute script rdiff-backup
|
rdiff_backup.SetConnections.SetConnectionsException
|
def RORP2Record(rorpath):
"""From RORPath, return text record of file's metadata"""
str_list = [b"File %s\n" % quote_path(rorpath.get_indexpath())]
# Store file type, e.g. "dev", "reg", or "sym", and type-specific data
type = rorpath.gettype()
if type is None:
type = "None"
str_list.append(b" Type %b\n" % type.encode("ascii"))
if type == "reg":
str_list.append(b" Size %i\n" % rorpath.getsize())
# If there is a resource fork, save it.
if rorpath.has_resource_fork():
if not rorpath.get_resource_fork():
rf = b"None"
else:
rf = binascii.hexlify(rorpath.get_resource_fork())
str_list.append(b" ResourceFork %b\n" % (rf,))
# If there is Carbon data, save it.
if rorpath.has_carbonfile():
cfile = carbonfile2string(rorpath.get_carbonfile())
str_list.append(b" CarbonFile %b\n" % (cfile,))
# If file is hardlinked, add that information
if Globals.preserve_hardlinks != 0:
numlinks = rorpath.getnumlinks()
if numlinks > 1:
str_list.append(b" NumHardLinks %i\n" % numlinks)
str_list.append(b" Inode %i\n" % rorpath.getinode())
str_list.append(b" DeviceLoc %i\n" % rorpath.getdevloc())
# Save any hashes, if available
if rorpath.has_sha1():
str_list.append(b" SHA1Digest %b\n" % rorpath.get_sha1().encode("ascii"))
elif type == "None":
return b"".join(str_list)
elif type == "dir" or type == "sock" or type == "fifo":
pass
elif type == "sym":
str_list.append(b" SymData %b\n" % quote_path(rorpath.readlink()))
elif type == "dev":
devchar, major, minor = rorpath.getdevnums()
str_list.append(
b" DeviceNum %b %i %i\n" % (devchar.encode("ascii"), major, minor)
)
# Store time information
if type != "sym" and type != "dev":
str_list.append(b" ModTime %i\n" % rorpath.getmtime())
# Add user, group, and permission information
uid, gid = rorpath.getuidgid()
str_list.append(b" Uid %i\n" % uid)
str_list.append(b" Uname %b\n" % (rorpath.getuname() or ":").encode())
str_list.append(b" Gid %i\n" % gid)
str_list.append(b" Gname %b\n" % (rorpath.getgname() or ":").encode())
str_list.append(b" Permissions %d\n" % rorpath.getperms())
# Add long filename information
if rorpath.has_alt_mirror_name():
str_list.append(
b" AlternateMirrorName %b\n" % (rorpath.get_alt_mirror_name(),)
)
elif rorpath.has_alt_inc_name():
str_list.append(
b" AlternateIncrementName %b\n" % (rorpath.get_alt_inc_name(),)
)
return b"".join(str_list)
|
def RORP2Record(rorpath):
"""From RORPath, return text record of file's metadata"""
str_list = [b"File %s\n" % quote_path(rorpath.get_indexpath())]
# Store file type, e.g. "dev", "reg", or "sym", and type-specific data
type = rorpath.gettype()
if type is None:
type = "None"
str_list.append(b" Type %b\n" % type.encode("ascii"))
if type == "reg":
str_list.append(b" Size %i\n" % rorpath.getsize())
# If there is a resource fork, save it.
if rorpath.has_resource_fork():
if not rorpath.get_resource_fork():
rf = b"None"
else:
rf = binascii.hexlify(rorpath.get_resource_fork())
str_list.append(b" ResourceFork %b\n" % (rf,))
# If there is Carbon data, save it.
if rorpath.has_carbonfile():
cfile = carbonfile2string(rorpath.get_carbonfile())
str_list.append(b" CarbonFile %b\n" % (cfile,))
# If file is hardlinked, add that information
if Globals.preserve_hardlinks != 0:
numlinks = rorpath.getnumlinks()
if numlinks > 1:
str_list.append(b" NumHardLinks %i\n" % numlinks)
str_list.append(b" Inode %i\n" % rorpath.getinode())
str_list.append(b" DeviceLoc %i\n" % rorpath.getdevloc())
# Save any hashes, if available
if rorpath.has_sha1():
str_list.append(b" SHA1Digest %b\n" % rorpath.get_sha1().encode("ascii"))
elif type == "None":
return b"".join(str_list)
elif type == "dir" or type == "sock" or type == "fifo":
pass
elif type == "sym":
str_list.append(b" SymData %b\n" % quote_path(rorpath.readlink()))
elif type == "dev":
major, minor = rorpath.getdevnums()
if rorpath.isblkdev():
devchar = "b"
else:
assert rorpath.ischardev()
devchar = "c"
str_list.append(b" DeviceNum %b %i %i\n" % (devchar.encode(), major, minor))
# Store time information
if type != "sym" and type != "dev":
str_list.append(b" ModTime %i\n" % rorpath.getmtime())
# Add user, group, and permission information
uid, gid = rorpath.getuidgid()
str_list.append(b" Uid %i\n" % uid)
str_list.append(b" Uname %b\n" % (rorpath.getuname() or ":").encode())
str_list.append(b" Gid %i\n" % gid)
str_list.append(b" Gname %b\n" % (rorpath.getgname() or ":").encode())
str_list.append(b" Permissions %d\n" % rorpath.getperms())
# Add long filename information
if rorpath.has_alt_mirror_name():
str_list.append(
b" AlternateMirrorName %b\n" % (rorpath.get_alt_mirror_name(),)
)
elif rorpath.has_alt_inc_name():
str_list.append(
b" AlternateIncrementName %b\n" % (rorpath.get_alt_inc_name(),)
)
return b"".join(str_list)
|
https://github.com/rdiff-backup/rdiff-backup/issues/401
|
# rdiff-backup /dev/ /tmp/faketarget/
Exception '[Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'' raised of class '<class 'FileNotFoundError'>':
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
FileNotFoundError: [Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'
|
FileNotFoundError
|
def copy(rpin, rpout, compress=0):
"""Copy RPath rpin to rpout. Works for symlinks, dirs, etc.
Returns close value of input for regular file, which can be used
to pass hashes on.
"""
log.Log("Regular copying %s to %s" % (rpin.index, rpout.get_safepath()), 6)
if not rpin.lstat():
if rpout.lstat():
rpout.delete()
return
if rpout.lstat():
if rpin.isreg() or not cmp(rpin, rpout):
rpout.delete() # easier to write than compare
else:
return
if rpin.isreg():
return copy_reg_file(rpin, rpout, compress)
elif rpin.isdir():
rpout.mkdir()
elif rpin.issym():
# some systems support permissions for symlinks, but
# only by setting at creation via the umask
if Globals.symlink_perms:
orig_umask = os.umask(0o777 & ~rpin.getperms())
rpout.symlink(rpin.readlink())
if Globals.symlink_perms:
os.umask(orig_umask) # restore previous umask
elif rpin.isdev():
dev_type, major, minor = rpin.getdevnums()
rpout.makedev(dev_type, major, minor)
elif rpin.isfifo():
rpout.mkfifo()
elif rpin.issock():
rpout.mksock()
else:
raise RPathException("File '%s' has unknown type." % rpin.get_safepath())
|
def copy(rpin, rpout, compress=0):
"""Copy RPath rpin to rpout. Works for symlinks, dirs, etc.
Returns close value of input for regular file, which can be used
to pass hashes on.
"""
log.Log("Regular copying %s to %s" % (rpin.index, rpout.get_safepath()), 6)
if not rpin.lstat():
if rpout.lstat():
rpout.delete()
return
if rpout.lstat():
if rpin.isreg() or not cmp(rpin, rpout):
rpout.delete() # easier to write than compare
else:
return
if rpin.isreg():
return copy_reg_file(rpin, rpout, compress)
elif rpin.isdir():
rpout.mkdir()
elif rpin.issym():
# some systems support permissions for symlinks, but
# only by setting at creation via the umask
if Globals.symlink_perms:
orig_umask = os.umask(0o777 & ~rpin.getperms())
rpout.symlink(rpin.readlink())
if Globals.symlink_perms:
os.umask(orig_umask) # restore previous umask
elif rpin.ischardev():
major, minor = rpin.getdevnums()
rpout.makedev("c", major, minor)
elif rpin.isblkdev():
major, minor = rpin.getdevnums()
rpout.makedev("b", major, minor)
elif rpin.isfifo():
rpout.mkfifo()
elif rpin.issock():
rpout.mksock()
else:
raise RPathException("File '%s' has unknown type." % rpin.get_safepath())
|
https://github.com/rdiff-backup/rdiff-backup/issues/401
|
# rdiff-backup /dev/ /tmp/faketarget/
Exception '[Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'' raised of class '<class 'FileNotFoundError'>':
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
FileNotFoundError: [Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'
|
FileNotFoundError
|
def cmp(rpin, rpout):
"""True if rpin has the same data as rpout
cmp does not compare file ownership, permissions, or times, or
examine the contents of a directory.
"""
check_for_files(rpin, rpout)
if rpin.isreg():
if not rpout.isreg():
return None
fp1, fp2 = rpin.open("rb"), rpout.open("rb")
result = cmpfileobj(fp1, fp2)
if fp1.close() or fp2.close():
raise RPathException("Error closing file")
return result
elif rpin.isdir():
return rpout.isdir()
elif rpin.issym():
return rpout.issym() and (rpin.readlink() == rpout.readlink())
elif rpin.isdev():
return rpout.isdev() and (rpin.getdevnums() == rpout.getdevnums())
elif rpin.isfifo():
return rpout.isfifo()
elif rpin.issock():
return rpout.issock()
else:
raise RPathException("File %s has unknown type" % rpin.get_safepath())
|
def cmp(rpin, rpout):
"""True if rpin has the same data as rpout
cmp does not compare file ownership, permissions, or times, or
examine the contents of a directory.
"""
check_for_files(rpin, rpout)
if rpin.isreg():
if not rpout.isreg():
return None
fp1, fp2 = rpin.open("rb"), rpout.open("rb")
result = cmpfileobj(fp1, fp2)
if fp1.close() or fp2.close():
raise RPathException("Error closing file")
return result
elif rpin.isdir():
return rpout.isdir()
elif rpin.issym():
return rpout.issym() and (rpin.readlink() == rpout.readlink())
elif rpin.ischardev():
return rpout.ischardev() and (rpin.getdevnums() == rpout.getdevnums())
elif rpin.isblkdev():
return rpout.isblkdev() and (rpin.getdevnums() == rpout.getdevnums())
elif rpin.isfifo():
return rpout.isfifo()
elif rpin.issock():
return rpout.issock()
else:
raise RPathException("File %s has unknown type" % rpin.get_safepath())
|
https://github.com/rdiff-backup/rdiff-backup/issues/401
|
# rdiff-backup /dev/ /tmp/faketarget/
Exception '[Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'' raised of class '<class 'FileNotFoundError'>':
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
FileNotFoundError: [Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'
|
FileNotFoundError
|
def make_file_dict(filename):
"""Generate the data dictionary for the given RPath
This is a global function so that os.name can be called locally,
thus avoiding network lag and so that we only need to send the
filename over the network, thus avoiding the need to pickle an
(incomplete) rpath object.
"""
def _readlink(filename):
"""FIXME wrapper function to workaround a bug in os.readlink on Windows
not accepting bytes path. This function can be removed once pyinstaller
supports Python 3.8 and a new release can be made.
See https://github.com/pyinstaller/pyinstaller/issues/4311
"""
if os.name == "nt" and not isinstance(filename, str):
# we assume a bytes representation
return os.fsencode(os.readlink(os.fsdecode(filename)))
else:
return os.readlink(filename)
try:
statblock = os.lstat(filename)
except (FileNotFoundError, NotADirectoryError):
# FIXME not sure if this shouldn't trigger a warning but doing it
# generates (too) many messages during the tests
# log.Log("Warning: missing file '%s' couldn't be assessed." % filename, 2)
return {"type": None}
data = {}
mode = statblock[stat.ST_MODE]
if stat.S_ISREG(mode):
type_ = "reg"
elif stat.S_ISDIR(mode):
type_ = "dir"
elif stat.S_ISCHR(mode):
type_ = "dev"
s = statblock.st_rdev
data["devnums"] = ("c", os.major(s), os.minor(s))
elif stat.S_ISBLK(mode):
type_ = "dev"
s = statblock.st_rdev
data["devnums"] = ("b", os.major(s), os.minor(s))
elif stat.S_ISFIFO(mode):
type_ = "fifo"
elif stat.S_ISLNK(mode):
type_ = "sym"
# FIXME reverse once Python 3.8 can be used under Windows
# data['linkname'] = os.readlink(filename)
data["linkname"] = _readlink(filename)
elif stat.S_ISSOCK(mode):
type_ = "sock"
else:
raise C.UnknownFileError(filename)
data["type"] = type_
data["size"] = statblock[stat.ST_SIZE]
data["perms"] = stat.S_IMODE(mode)
data["uid"] = statblock[stat.ST_UID]
data["gid"] = statblock[stat.ST_GID]
data["inode"] = statblock[stat.ST_INO]
data["devloc"] = statblock[stat.ST_DEV]
data["nlink"] = statblock[stat.ST_NLINK]
if os.name == "nt":
try:
attribs = win32api.GetFileAttributes(os.fsdecode(filename))
except pywintypes.error as exc:
if exc.args[0] == 32: # file in use
# we could also ignore with: return {'type': None}
# but this approach seems to be better handled
attribs = 0
else:
# we replace the specific Windows exception by a generic
# one also understood by a potential Linux client/server
raise OSError(
None, exc.args[1] + " - " + exc.args[2], filename, exc.args[0]
) from None
if attribs & win32con.FILE_ATTRIBUTE_REPARSE_POINT:
data["type"] = "sym"
data["linkname"] = None
if not (type_ == "sym" or type_ == "dev"):
# mtimes on symlinks and dev files don't work consistently
data["mtime"] = int(statblock[stat.ST_MTIME])
data["atime"] = int(statblock[stat.ST_ATIME])
data["ctime"] = int(statblock[stat.ST_CTIME])
return data
|
def make_file_dict(filename):
"""Generate the data dictionary for the given RPath
This is a global function so that os.name can be called locally,
thus avoiding network lag and so that we only need to send the
filename over the network, thus avoiding the need to pickle an
(incomplete) rpath object.
"""
def _readlink(filename):
"""FIXME wrapper function to workaround a bug in os.readlink on Windows
not accepting bytes path. This function can be removed once pyinstaller
supports Python 3.8 and a new release can be made.
See https://github.com/pyinstaller/pyinstaller/issues/4311
"""
if os.name == "nt" and not isinstance(filename, str):
# we assume a bytes representation
return os.fsencode(os.readlink(os.fsdecode(filename)))
else:
return os.readlink(filename)
try:
statblock = os.lstat(filename)
except (FileNotFoundError, NotADirectoryError):
# FIXME not sure if this shouldn't trigger a warning but doing it
# generates (too) many messages during the tests
# log.Log("Warning: missing file '%s' couldn't be assessed." % filename, 2)
return {"type": None}
data = {}
mode = statblock[stat.ST_MODE]
if stat.S_ISREG(mode):
type_ = "reg"
elif stat.S_ISDIR(mode):
type_ = "dir"
elif stat.S_ISCHR(mode):
type_ = "dev"
s = statblock.st_rdev
data["devnums"] = ("c",) + (s >> 8, s & 0xFF)
elif stat.S_ISBLK(mode):
type_ = "dev"
s = statblock.st_rdev
data["devnums"] = ("b",) + (s >> 8, s & 0xFF)
elif stat.S_ISFIFO(mode):
type_ = "fifo"
elif stat.S_ISLNK(mode):
type_ = "sym"
# FIXME reverse once Python 3.8 can be used under Windows
# data['linkname'] = os.readlink(filename)
data["linkname"] = _readlink(filename)
elif stat.S_ISSOCK(mode):
type_ = "sock"
else:
raise C.UnknownFileError(filename)
data["type"] = type_
data["size"] = statblock[stat.ST_SIZE]
data["perms"] = stat.S_IMODE(mode)
data["uid"] = statblock[stat.ST_UID]
data["gid"] = statblock[stat.ST_GID]
data["inode"] = statblock[stat.ST_INO]
data["devloc"] = statblock[stat.ST_DEV]
data["nlink"] = statblock[stat.ST_NLINK]
if os.name == "nt":
try:
attribs = win32api.GetFileAttributes(os.fsdecode(filename))
except pywintypes.error as exc:
if exc.args[0] == 32: # file in use
# we could also ignore with: return {'type': None}
# but this approach seems to be better handled
attribs = 0
else:
# we replace the specific Windows exception by a generic
# one also understood by a potential Linux client/server
raise OSError(
None, exc.args[1] + " - " + exc.args[2], filename, exc.args[0]
) from None
if attribs & win32con.FILE_ATTRIBUTE_REPARSE_POINT:
data["type"] = "sym"
data["linkname"] = None
if not (type_ == "sym" or type_ == "dev"):
# mtimes on symlinks and dev files don't work consistently
data["mtime"] = int(statblock[stat.ST_MTIME])
data["atime"] = int(statblock[stat.ST_ATIME])
data["ctime"] = int(statblock[stat.ST_CTIME])
return data
|
https://github.com/rdiff-backup/rdiff-backup/issues/401
|
# rdiff-backup /dev/ /tmp/faketarget/
Exception '[Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'' raised of class '<class 'FileNotFoundError'>':
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
FileNotFoundError: [Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'
|
FileNotFoundError
|
def getdevnums(self):
"""Return a device's type and major/minor numbers from dictionary"""
return self.data["devnums"]
|
def getdevnums(self):
"""Return a devices major/minor numbers from dictionary"""
return self.data["devnums"][1:]
|
https://github.com/rdiff-backup/rdiff-backup/issues/401
|
# rdiff-backup /dev/ /tmp/faketarget/
Exception '[Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'' raised of class '<class 'FileNotFoundError'>':
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
Traceback (most recent call last):
File "/usr/bin/rdiff-backup", line 32, in <module>
rdiff_backup.Main.error_check_Main(sys.argv[1:])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 390, in error_check_Main
Main(arglist)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 412, in Main
take_action(rps)
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 348, in take_action
Backup(rps[0], rps[1])
File "/usr/lib/python3/dist-packages/rdiff_backup/Main.py", line 437, in Backup
backup.Mirror(rpin, rpout)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 39, in Mirror
DestS.patch(dest_rpath, source_diffiter)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 269, in patch
ITR(diff.index, diff)
File "/usr/lib/python3/dist-packages/rdiff_backup/rorpiter.py", line 313, in __call__
last_branch.fast_process(*args)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 587, in fast_process
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 612, in patch_to_temp
result = self.patch_snapshot_to_temp(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/backup.py", line 655, in patch_snapshot_to_temp
rpath.copy_attribs(diff_rorp, new)
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 240, in copy_attribs
rpout.chown(*rpout.conn.user_group.map_rpath(rpin))
File "/usr/lib/python3/dist-packages/rdiff_backup/rpath.py", line 1192, in chown
self.conn.os.chown(self.path, uid, gid)
FileNotFoundError: [Errno 2] No such file or directory: b'/tmp/faketarget/bus/usb/003/rdiff-backup.tmp.22'
|
FileNotFoundError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.