text
stringlengths
1
93.6k
@functools.cached_property
def mean(self):
pass
@functools.cached_property
def mode(self):
pass
@abstractmethod
def log_prob(self, x):
pass
@abstractmethod
def sample(self):
pass
class DiscretizedDistribution(DiscreteDistribution):
def __init__(self, num_bins, device):
self.num_bins = num_bins
self.bin_width = 2.0 / num_bins
self.half_bin_width = self.bin_width / 2.0
self.device = device
@functools.cached_property
def class_centres(self):
return torch.arange(self.half_bin_width - 1, 1, self.bin_width, device=self.device)
@functools.cached_property
def class_boundaries(self):
return torch.arange(self.bin_width - 1, 1 - self.half_bin_width, self.bin_width, device=self.device)
@functools.cached_property
def mean(self):
return (self.probs * self.class_centres).sum(-1)
@functools.cached_property
def mode(self):
mode_idx = self.probs.argmax(-1).flatten()
return self.class_centres[mode_idx].reshape(self.probs.shape[:-1])
class DiscretizedCtsDistribution(DiscretizedDistribution):
def __init__(self, cts_dist, num_bins, device, batch_dims, clip=True, min_prob=1e-5):
super().__init__(num_bins, device)
self.cts_dist = cts_dist
self.log_bin_width = log(self.bin_width)
self.batch_dims = batch_dims
self.clip = clip
self.min_prob = min_prob
@functools.cached_property
def probs(self):
bdry_cdfs = self.cts_dist.cdf(self.class_boundaries.reshape([-1] + ([1] * self.batch_dims)))
bdry_slice = bdry_cdfs[:1]
if self.clip:
cdf_min = torch.zeros_like(bdry_slice)
cdf_max = torch.ones_like(bdry_slice)
bdry_cdfs = torch.cat([cdf_min, bdry_cdfs, cdf_max], 0)
return (bdry_cdfs[1:] - bdry_cdfs[:-1]).moveaxis(0, -1)
else:
cdf_min = self.cts_dist.cdf(torch.zeros_like(bdry_slice) - 1)
cdf_max = self.cts_dist.cdf(torch.ones_like(bdry_slice))
bdry_cdfs = torch.cat([cdf_min, bdry_cdfs, cdf_max], 0)
cdf_range = cdf_max - cdf_min
cdf_mask = cdf_range < self.min_prob
cdf_range = torch.where(cdf_mask, (cdf_range * 0) + 1, cdf_range)
probs = (bdry_cdfs[1:] - bdry_cdfs[:-1]) / cdf_range
probs = torch.where(cdf_mask, (probs * 0) + (1 / self.num_bins), probs)
return probs.moveaxis(0, -1)
def prob(self, x):
class_idx = float_to_idx(x, self.num_bins)
centre = idx_to_float(class_idx, self.num_bins)
cdf_lo = self.cts_dist.cdf(centre - self.half_bin_width)
cdf_hi = self.cts_dist.cdf(centre + self.half_bin_width)
if self.clip:
cdf_lo = torch.where(class_idx <= 0, torch.zeros_like(centre), cdf_lo)
cdf_hi = torch.where(class_idx >= (self.num_bins - 1), torch.ones_like(centre), cdf_hi)
return cdf_hi - cdf_lo
else:
cdf_min = self.cts_dist.cdf(torch.zeros_like(centre) - 1)
cdf_max = self.cts_dist.cdf(torch.ones_like(centre))
cdf_range = cdf_max - cdf_min
cdf_mask = cdf_range < self.min_prob
cdf_range = torch.where(cdf_mask, (cdf_range * 0) + 1, cdf_range)
prob = (cdf_hi - cdf_lo) / cdf_range
return torch.where(cdf_mask, (prob * 0) + (1 / self.num_bins), prob)
def log_prob(self, x):
prob = self.prob(x)
return torch.where(
prob < self.min_prob,
self.cts_dist.log_prob(quantize(x, self.num_bins)) + self.log_bin_width,
safe_log(prob),
)
def sample(self, sample_shape=torch.Size([])):
if self.clip:
return quantize(self.cts_dist.sample(sample_shape), self.num_bins)