text
stringlengths 1
93.6k
|
|---|
def get_dist(self, params: torch.Tensor, input_params=None, t=None) -> DiscreteDistribution:
|
"""Note: input_params and t are only required by PredDistToDataDistFactory."""
|
pass
|
class BernoulliFactory(DiscreteDistributionFactory):
|
def get_dist(self, params, input_params=None, t=None):
|
return Bernoulli(logits=params.squeeze(-1))
|
class CategoricalFactory(DiscreteDistributionFactory):
|
def get_dist(self, params, input_params=None, t=None):
|
return Categorical(logits=params)
|
class DiscretizedBernoulliFactory(DiscreteDistributionFactory):
|
def get_dist(self, params, input_params=None, t=None):
|
return DiscretizedBernoulli(logits=params.squeeze(-1))
|
class DiscretizedCategoricalFactory(DiscreteDistributionFactory):
|
def get_dist(self, params, input_params=None, t=None):
|
return DiscretizedCategorical(logits=params)
|
class DiscretizedGMMFactory(DiscreteDistributionFactory):
|
def __init__(self, num_bins, clip=True, min_std_dev=1e-3, max_std_dev=10, min_prob=1e-5, log_dev=True):
|
self.num_bins = num_bins
|
self.clip = clip
|
self.min_std_dev = min_std_dev
|
self.max_std_dev = max_std_dev
|
self.min_prob = min_prob
|
self.log_dev = log_dev
|
def get_dist(self, params, input_params=None, t=None):
|
return DiscretizedGMM(
|
params,
|
num_bins=self.num_bins,
|
clip=self.clip,
|
min_std_dev=self.min_std_dev,
|
max_std_dev=self.max_std_dev,
|
min_prob=self.min_prob,
|
log_dev=self.log_dev,
|
)
|
class DiscretizedNormalFactory(DiscreteDistributionFactory):
|
def __init__(self, num_bins, clip=True, min_std_dev=1e-3, max_std_dev=10, min_prob=1e-5, log_dev=True):
|
self.num_bins = num_bins
|
self.clip = clip
|
self.min_std_dev = min_std_dev
|
self.max_std_dev = max_std_dev
|
self.min_prob = min_prob
|
self.log_dev = log_dev
|
def get_dist(self, params, input_params=None, t=None):
|
return DiscretizedNormal(
|
params,
|
num_bins=self.num_bins,
|
clip=self.clip,
|
min_std_dev=self.min_std_dev,
|
max_std_dev=self.max_std_dev,
|
min_prob=self.min_prob,
|
log_dev=self.log_dev,
|
)
|
def noise_pred_params_to_data_pred_params(noise_pred_params: torch.Tensor, input_mean: torch.Tensor, t: torch.Tensor, min_variance: float, min_t=1e-6):
|
"""Convert output parameters that predict the noise added to data, to parameters that predict the data."""
|
data_shape = list(noise_pred_params.shape)[:-1]
|
noise_pred_params = sandwich(noise_pred_params)
|
input_mean = input_mean.flatten(start_dim=1)
|
if torch.is_tensor(t):
|
t = t.flatten(start_dim=1)
|
else:
|
t = (input_mean * 0) + t
|
alpha_mask = (t < min_t).unsqueeze(-1)
|
posterior_var = torch.pow(min_variance, t.clamp(min=min_t))
|
gamma = 1 - posterior_var
|
A = (input_mean / gamma).unsqueeze(-1)
|
B = (posterior_var / gamma).sqrt().unsqueeze(-1)
|
data_pred_params = []
|
if noise_pred_params.size(-1) == 1:
|
noise_pred_mean = noise_pred_params
|
elif noise_pred_params.size(-1) == 2:
|
noise_pred_mean, noise_pred_log_dev = noise_pred_params.chunk(2, -1)
|
else:
|
assert noise_pred_params.size(-1) % 3 == 0
|
mix_wt_logits, noise_pred_mean, noise_pred_log_dev = noise_pred_params.chunk(3, -1)
|
data_pred_params.append(mix_wt_logits)
|
data_pred_mean = A - (B * noise_pred_mean)
|
data_pred_mean = torch.where(alpha_mask, 0 * data_pred_mean, data_pred_mean)
|
data_pred_params.append(data_pred_mean)
|
if noise_pred_params.size(-1) >= 2:
|
noise_pred_dev = safe_exp(noise_pred_log_dev)
|
data_pred_dev = B * noise_pred_dev
|
data_pred_dev = torch.where(alpha_mask, 1 + (0 * data_pred_dev), data_pred_dev)
|
data_pred_params.append(data_pred_dev)
|
data_pred_params = torch.cat(data_pred_params, -1)
|
data_pred_params = data_pred_params.reshape(data_shape + [-1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.