doc_content stringlengths 1 386k | doc_id stringlengths 5 188 |
|---|---|
entropy() [source] | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.entropy |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.expand |
has_rsample = True | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.has_rsample |
log_prob(value) [source] | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.log_prob |
property mean | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.mean |
precision_matrix [source] | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.precision_matrix |
rsample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.rsample |
scale_tril [source] | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.scale_tril |
support = IndependentConstraint(Real(), 1) | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.support |
variance [source] | torch.distributions#torch.distributions.lowrank_multivariate_normal.LowRankMultivariateNormal.variance |
class torch.distributions.mixture_same_family.MixtureSameFamily(mixture_distribution, component_distribution, validate_args=None) [source]
Bases: torch.distributions.distribution.Distribution The MixtureSameFamily distribution implements a (batch of) mixture distribution where all component are from different parameterizations of the same distribution type. It is parameterized by a Categorical “selecting distribution” (over k component) and a component distribution, i.e., a Distribution with a rightmost batch shape (equal to [k]) which indexes each (batch of) component. Examples: # Construct Gaussian Mixture Model in 1D consisting of 5 equally
# weighted normal distributions
>>> mix = D.Categorical(torch.ones(5,))
>>> comp = D.Normal(torch.randn(5,), torch.rand(5,))
>>> gmm = MixtureSameFamily(mix, comp)
# Construct Gaussian Mixture Modle in 2D consisting of 5 equally
# weighted bivariate normal distributions
>>> mix = D.Categorical(torch.ones(5,))
>>> comp = D.Independent(D.Normal(
torch.randn(5,2), torch.rand(5,2)), 1)
>>> gmm = MixtureSameFamily(mix, comp)
# Construct a batch of 3 Gaussian Mixture Models in 2D each
# consisting of 5 random weighted bivariate normal distributions
>>> mix = D.Categorical(torch.rand(3,5))
>>> comp = D.Independent(D.Normal(
torch.randn(3,5,2), torch.rand(3,5,2)), 1)
>>> gmm = MixtureSameFamily(mix, comp)
Parameters
mixture_distribution – torch.distributions.Categorical-like instance. Manages the probability of selecting component. The number of categories must match the rightmost batch dimension of the component_distribution. Must have either scalar batch_shape or batch_shape matching component_distribution.batch_shape[:-1]
component_distribution – torch.distributions.Distribution-like instance. Right-most batch dimension indexes component.
arg_constraints: Dict[str, torch.distributions.constraints.Constraint] = {}
cdf(x) [source]
property component_distribution
expand(batch_shape, _instance=None) [source]
has_rsample = False
log_prob(x) [source]
property mean
property mixture_distribution
sample(sample_shape=torch.Size([])) [source]
property support
property variance | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily |
arg_constraints: Dict[str, torch.distributions.constraints.Constraint] = {} | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.arg_constraints |
cdf(x) [source] | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.cdf |
property component_distribution | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.component_distribution |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.expand |
has_rsample = False | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.has_rsample |
log_prob(x) [source] | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.log_prob |
property mean | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.mean |
property mixture_distribution | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.mixture_distribution |
sample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.sample |
property support | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.support |
property variance | torch.distributions#torch.distributions.mixture_same_family.MixtureSameFamily.variance |
class torch.distributions.multinomial.Multinomial(total_count=1, probs=None, logits=None, validate_args=None) [source]
Bases: torch.distributions.distribution.Distribution Creates a Multinomial distribution parameterized by total_count and either probs or logits (but not both). The innermost dimension of probs indexes over categories. All other dimensions index over batches. Note that total_count need not be specified if only log_prob() is called (see example below) Note The probs argument must be non-negative, finite and have a non-zero sum, and it will be normalized to sum to 1 along the last dimension. attr:probs will return this normalized value. The logits argument will be interpreted as unnormalized log probabilities and can therefore be any real number. It will likewise be normalized so that the resulting probabilities sum to 1 along the last dimension. attr:logits will return this normalized value.
sample() requires a single shared total_count for all parameters and samples.
log_prob() allows different total_count for each parameter and sample. Example: >>> m = Multinomial(100, torch.tensor([ 1., 1., 1., 1.]))
>>> x = m.sample() # equal probability of 0, 1, 2, 3
tensor([ 21., 24., 30., 25.])
>>> Multinomial(probs=torch.tensor([1., 1., 1., 1.])).log_prob(x)
tensor([-4.1338])
Parameters
total_count (int) – number of trials
probs (Tensor) – event probabilities
logits (Tensor) – event log probabilities (unnormalized)
arg_constraints = {'logits': IndependentConstraint(Real(), 1), 'probs': Simplex()}
expand(batch_shape, _instance=None) [source]
log_prob(value) [source]
property logits
property mean
property param_shape
property probs
sample(sample_shape=torch.Size([])) [source]
property support
total_count: int = None
property variance | torch.distributions#torch.distributions.multinomial.Multinomial |
arg_constraints = {'logits': IndependentConstraint(Real(), 1), 'probs': Simplex()} | torch.distributions#torch.distributions.multinomial.Multinomial.arg_constraints |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.multinomial.Multinomial.expand |
property logits | torch.distributions#torch.distributions.multinomial.Multinomial.logits |
log_prob(value) [source] | torch.distributions#torch.distributions.multinomial.Multinomial.log_prob |
property mean | torch.distributions#torch.distributions.multinomial.Multinomial.mean |
property param_shape | torch.distributions#torch.distributions.multinomial.Multinomial.param_shape |
property probs | torch.distributions#torch.distributions.multinomial.Multinomial.probs |
sample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.multinomial.Multinomial.sample |
property support | torch.distributions#torch.distributions.multinomial.Multinomial.support |
total_count: int = None | torch.distributions#torch.distributions.multinomial.Multinomial.total_count |
property variance | torch.distributions#torch.distributions.multinomial.Multinomial.variance |
class torch.distributions.multivariate_normal.MultivariateNormal(loc, covariance_matrix=None, precision_matrix=None, scale_tril=None, validate_args=None) [source]
Bases: torch.distributions.distribution.Distribution Creates a multivariate normal (also called Gaussian) distribution parameterized by a mean vector and a covariance matrix. The multivariate normal distribution can be parameterized either in terms of a positive definite covariance matrix Σ\mathbf{\Sigma} or a positive definite precision matrix Σ−1\mathbf{\Sigma}^{-1} or a lower-triangular matrix L\mathbf{L} with positive-valued diagonal entries, such that Σ=LL⊤\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top . This triangular matrix can be obtained via e.g. Cholesky decomposition of the covariance. Example >>> m = MultivariateNormal(torch.zeros(2), torch.eye(2))
>>> m.sample() # normally distributed with mean=`[0,0]` and covariance_matrix=`I`
tensor([-0.2102, -0.5429])
Parameters
loc (Tensor) – mean of the distribution
covariance_matrix (Tensor) – positive-definite covariance matrix
precision_matrix (Tensor) – positive-definite precision matrix
scale_tril (Tensor) – lower-triangular factor of covariance, with positive-valued diagonal Note Only one of covariance_matrix or precision_matrix or scale_tril can be specified. Using scale_tril will be more efficient: all computations internally are based on scale_tril. If covariance_matrix or precision_matrix is passed instead, it is only used to compute the corresponding lower triangular matrices using a Cholesky decomposition.
arg_constraints = {'covariance_matrix': PositiveDefinite(), 'loc': IndependentConstraint(Real(), 1), 'precision_matrix': PositiveDefinite(), 'scale_tril': LowerCholesky()}
covariance_matrix [source]
entropy() [source]
expand(batch_shape, _instance=None) [source]
has_rsample = True
log_prob(value) [source]
property mean
precision_matrix [source]
rsample(sample_shape=torch.Size([])) [source]
scale_tril [source]
support = IndependentConstraint(Real(), 1)
property variance | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal |
arg_constraints = {'covariance_matrix': PositiveDefinite(), 'loc': IndependentConstraint(Real(), 1), 'precision_matrix': PositiveDefinite(), 'scale_tril': LowerCholesky()} | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.arg_constraints |
covariance_matrix [source] | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.covariance_matrix |
entropy() [source] | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.entropy |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.expand |
has_rsample = True | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.has_rsample |
log_prob(value) [source] | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.log_prob |
property mean | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.mean |
precision_matrix [source] | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.precision_matrix |
rsample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.rsample |
scale_tril [source] | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.scale_tril |
support = IndependentConstraint(Real(), 1) | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.support |
property variance | torch.distributions#torch.distributions.multivariate_normal.MultivariateNormal.variance |
class torch.distributions.negative_binomial.NegativeBinomial(total_count, probs=None, logits=None, validate_args=None) [source]
Bases: torch.distributions.distribution.Distribution Creates a Negative Binomial distribution, i.e. distribution of the number of successful independent and identical Bernoulli trials before total_count failures are achieved. The probability of failure of each Bernoulli trial is probs. Parameters
total_count (float or Tensor) – non-negative number of negative Bernoulli trials to stop, although the distribution is still valid for real valued count
probs (Tensor) – Event probabilities of failure in the half open interval [0, 1)
logits (Tensor) – Event log-odds for probabilities of failure
arg_constraints = {'logits': Real(), 'probs': HalfOpenInterval(lower_bound=0.0, upper_bound=1.0), 'total_count': GreaterThanEq(lower_bound=0)}
expand(batch_shape, _instance=None) [source]
log_prob(value) [source]
logits [source]
property mean
property param_shape
probs [source]
sample(sample_shape=torch.Size([])) [source]
support = IntegerGreaterThan(lower_bound=0)
property variance | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial |
arg_constraints = {'logits': Real(), 'probs': HalfOpenInterval(lower_bound=0.0, upper_bound=1.0), 'total_count': GreaterThanEq(lower_bound=0)} | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.arg_constraints |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.expand |
logits [source] | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.logits |
log_prob(value) [source] | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.log_prob |
property mean | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.mean |
property param_shape | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.param_shape |
probs [source] | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.probs |
sample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.sample |
support = IntegerGreaterThan(lower_bound=0) | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.support |
property variance | torch.distributions#torch.distributions.negative_binomial.NegativeBinomial.variance |
class torch.distributions.normal.Normal(loc, scale, validate_args=None) [source]
Bases: torch.distributions.exp_family.ExponentialFamily Creates a normal (also called Gaussian) distribution parameterized by loc and scale. Example: >>> m = Normal(torch.tensor([0.0]), torch.tensor([1.0]))
>>> m.sample() # normally distributed with loc=0 and scale=1
tensor([ 0.1046])
Parameters
loc (float or Tensor) – mean of the distribution (often referred to as mu)
scale (float or Tensor) – standard deviation of the distribution (often referred to as sigma)
arg_constraints = {'loc': Real(), 'scale': GreaterThan(lower_bound=0.0)}
cdf(value) [source]
entropy() [source]
expand(batch_shape, _instance=None) [source]
has_rsample = True
icdf(value) [source]
log_prob(value) [source]
property mean
rsample(sample_shape=torch.Size([])) [source]
sample(sample_shape=torch.Size([])) [source]
property stddev
support = Real()
property variance | torch.distributions#torch.distributions.normal.Normal |
arg_constraints = {'loc': Real(), 'scale': GreaterThan(lower_bound=0.0)} | torch.distributions#torch.distributions.normal.Normal.arg_constraints |
cdf(value) [source] | torch.distributions#torch.distributions.normal.Normal.cdf |
entropy() [source] | torch.distributions#torch.distributions.normal.Normal.entropy |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.normal.Normal.expand |
has_rsample = True | torch.distributions#torch.distributions.normal.Normal.has_rsample |
icdf(value) [source] | torch.distributions#torch.distributions.normal.Normal.icdf |
log_prob(value) [source] | torch.distributions#torch.distributions.normal.Normal.log_prob |
property mean | torch.distributions#torch.distributions.normal.Normal.mean |
rsample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.normal.Normal.rsample |
sample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.normal.Normal.sample |
property stddev | torch.distributions#torch.distributions.normal.Normal.stddev |
support = Real() | torch.distributions#torch.distributions.normal.Normal.support |
property variance | torch.distributions#torch.distributions.normal.Normal.variance |
class torch.distributions.one_hot_categorical.OneHotCategorical(probs=None, logits=None, validate_args=None) [source]
Bases: torch.distributions.distribution.Distribution Creates a one-hot categorical distribution parameterized by probs or logits. Samples are one-hot coded vectors of size probs.size(-1). Note The probs argument must be non-negative, finite and have a non-zero sum, and it will be normalized to sum to 1 along the last dimension. attr:probs will return this normalized value. The logits argument will be interpreted as unnormalized log probabilities and can therefore be any real number. It will likewise be normalized so that the resulting probabilities sum to 1 along the last dimension. attr:logits will return this normalized value. See also: torch.distributions.Categorical() for specifications of probs and logits. Example: >>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ]))
>>> m.sample() # equal probability of 0, 1, 2, 3
tensor([ 0., 0., 0., 1.])
Parameters
probs (Tensor) – event probabilities
logits (Tensor) – event log probabilities (unnormalized)
arg_constraints = {'logits': IndependentConstraint(Real(), 1), 'probs': Simplex()}
entropy() [source]
enumerate_support(expand=True) [source]
expand(batch_shape, _instance=None) [source]
has_enumerate_support = True
log_prob(value) [source]
property logits
property mean
property param_shape
property probs
sample(sample_shape=torch.Size([])) [source]
support = OneHot()
property variance | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical |
arg_constraints = {'logits': IndependentConstraint(Real(), 1), 'probs': Simplex()} | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.arg_constraints |
entropy() [source] | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.entropy |
enumerate_support(expand=True) [source] | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.enumerate_support |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.expand |
has_enumerate_support = True | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.has_enumerate_support |
property logits | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.logits |
log_prob(value) [source] | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.log_prob |
property mean | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.mean |
property param_shape | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.param_shape |
property probs | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.probs |
sample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.sample |
support = OneHot() | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.support |
property variance | torch.distributions#torch.distributions.one_hot_categorical.OneHotCategorical.variance |
class torch.distributions.pareto.Pareto(scale, alpha, validate_args=None) [source]
Bases: torch.distributions.transformed_distribution.TransformedDistribution Samples from a Pareto Type 1 distribution. Example: >>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1
tensor([ 1.5623])
Parameters
scale (float or Tensor) – Scale parameter of the distribution
alpha (float or Tensor) – Shape parameter of the distribution
arg_constraints: Dict[str, torch.distributions.constraints.Constraint] = {'alpha': GreaterThan(lower_bound=0.0), 'scale': GreaterThan(lower_bound=0.0)}
entropy() [source]
expand(batch_shape, _instance=None) [source]
property mean
property support
property variance | torch.distributions#torch.distributions.pareto.Pareto |
arg_constraints: Dict[str, torch.distributions.constraints.Constraint] = {'alpha': GreaterThan(lower_bound=0.0), 'scale': GreaterThan(lower_bound=0.0)} | torch.distributions#torch.distributions.pareto.Pareto.arg_constraints |
entropy() [source] | torch.distributions#torch.distributions.pareto.Pareto.entropy |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.pareto.Pareto.expand |
property mean | torch.distributions#torch.distributions.pareto.Pareto.mean |
property support | torch.distributions#torch.distributions.pareto.Pareto.support |
property variance | torch.distributions#torch.distributions.pareto.Pareto.variance |
class torch.distributions.poisson.Poisson(rate, validate_args=None) [source]
Bases: torch.distributions.exp_family.ExponentialFamily Creates a Poisson distribution parameterized by rate, the rate parameter. Samples are nonnegative integers, with a pmf given by rateke−ratek!\mathrm{rate}^k \frac{e^{-\mathrm{rate}}}{k!}
Example: >>> m = Poisson(torch.tensor([4]))
>>> m.sample()
tensor([ 3.])
Parameters
rate (Number, Tensor) – the rate parameter
arg_constraints = {'rate': GreaterThan(lower_bound=0.0)}
expand(batch_shape, _instance=None) [source]
log_prob(value) [source]
property mean
sample(sample_shape=torch.Size([])) [source]
support = IntegerGreaterThan(lower_bound=0)
property variance | torch.distributions#torch.distributions.poisson.Poisson |
arg_constraints = {'rate': GreaterThan(lower_bound=0.0)} | torch.distributions#torch.distributions.poisson.Poisson.arg_constraints |
expand(batch_shape, _instance=None) [source] | torch.distributions#torch.distributions.poisson.Poisson.expand |
log_prob(value) [source] | torch.distributions#torch.distributions.poisson.Poisson.log_prob |
property mean | torch.distributions#torch.distributions.poisson.Poisson.mean |
sample(sample_shape=torch.Size([])) [source] | torch.distributions#torch.distributions.poisson.Poisson.sample |
support = IntegerGreaterThan(lower_bound=0) | torch.distributions#torch.distributions.poisson.Poisson.support |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.