id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
20,291 | import numpy as np
import torch
def img2mse(x, y, mask):
if mask is None:
return torch.mean((x - y) ** 2)
else:
return torch.sum((x - y) ** 2 * mask) / mask.sum() | null |
20,292 | import numpy as np
import torch
def mse2psnr(x):
return -10.0 * torch.log(x) / np.log(10) | null |
20,293 | import numpy as np
import torch
def cast_rays(t_vals, origins, directions, radii, ray_shape):
t0 = t_vals[..., :-1]
t1 = t_vals[..., 1:]
if ray_shape == "cone":
gaussian_fn = conical_frustum_to_gaussian
elif ray_shape == "cylinder":
gaussian_fn = cylinder_to_gaussian
else:
assert False
means, covs = gaussian_fn(directions, t0, t1, radii)
means = means + origins[..., None, :]
return means, covs
def sample_along_rays(
rays_o,
rays_d,
radii,
num_samples,
near,
far,
randomized,
lindisp,
ray_shape,
):
bsz = rays_o.shape[0]
t_vals = torch.linspace(0.0, 1.0, num_samples + 1, device=rays_o.device)
if lindisp:
t_vals = 1.0 / (1.0 / near * (1.0 - t_vals) + 1.0 / far * t_vals)
else:
t_vals = near * (1.0 - t_vals) + far * t_vals
if randomized:
mids = 0.5 * (t_vals[..., 1:] + t_vals[..., :-1])
upper = torch.cat([mids, t_vals[..., -1:]], -1)
lower = torch.cat([t_vals[..., :1], mids], -1)
t_rand = torch.rand((bsz, num_samples + 1), device=rays_o.device)
t_vals = lower + (upper - lower) * t_rand
else:
t_vals = torch.broadcast_to(t_vals, (bsz, num_samples + 1))
means, covs = cast_rays(t_vals, rays_o, rays_d, radii, ray_shape)
return t_vals, (means, covs) | null |
20,294 | import numpy as np
import torch
def sorted_piecewise_constant_pdf(
bins, weights, num_samples, randomized, float_min_eps=2**-32
):
def cast_rays(t_vals, origins, directions, radii, ray_shape):
def resample_along_rays(
rays_o,
rays_d,
radii,
t_vals,
weights,
randomized,
ray_shape,
stop_level_grad,
resample_padding,
):
weights_pad = torch.cat([weights[..., :1], weights, weights[..., -1:]], dim=-1)
weights_max = torch.fmax(weights_pad[..., :-1], weights_pad[..., 1:])
weights_blur = 0.5 * (weights_max[..., :-1] + weights_max[..., 1:])
weights = weights_blur + resample_padding
new_t_vals = sorted_piecewise_constant_pdf(
t_vals, weights, t_vals.shape[-1], randomized
)
if stop_level_grad:
new_t_vals = new_t_vals.detach()
means, covs = cast_rays(new_t_vals, rays_o, rays_d, radii, ray_shape)
return new_t_vals, (means, covs) | null |
20,295 | import numpy as np
import torch
def expected_sin(x, x_var):
def integrated_pos_enc(samples, min_deg, max_deg):
x, x_cov_diag = samples
scales = torch.tensor([2**i for i in range(min_deg, max_deg)]).type_as(x)
shape = list(x.shape[:-1]) + [-1]
y = torch.reshape(x[..., None, :] * scales[:, None], shape)
y_var = torch.reshape(x_cov_diag[..., None, :] * scales[:, None] ** 2, shape)
return expected_sin(
torch.cat([y, y + 0.5 * np.pi], axis=-1), torch.cat([y_var] * 2, axis=-1)
)[0] | null |
20,296 | import numpy as np
import torch
def volumetric_rendering(rgb, density, t_vals, dirs, white_bkgd):
t_mids = 0.5 * (t_vals[..., :-1] + t_vals[..., 1:])
t_dists = t_vals[..., 1:] - t_vals[..., :-1]
delta = t_dists * torch.norm(dirs[..., None, :], dim=-1)
# Note that we're quietly turning density from [..., 0] to [...].
density_delta = density[..., 0] * delta
alpha = 1 - torch.exp(-density_delta)
trans = torch.exp(
-torch.cat(
[
torch.zeros_like(density_delta[..., :1]),
torch.cumsum(density_delta[..., :-1], axis=-1),
],
axis=-1,
)
)
weights = alpha * trans
comp_rgb = (weights[..., None] * rgb).sum(axis=-2)
acc = weights.sum(axis=-1)
distance = (weights * t_mids).sum(axis=-1) / acc
distance = torch.clip(distance, t_vals[:, 0], t_vals[:, -1])
if white_bkgd:
comp_rgb = comp_rgb + (1.0 - acc[..., None])
return comp_rgb, distance, acc, weights | null |
20,297 | import numpy as np
import torch
def pos_enc(x, min_deg, max_deg, append_identity):
scales = torch.tensor([2**i for i in range(min_deg, max_deg)]).type_as(x)
xb = torch.reshape((x[..., None, :] * scales[:, None]), list(x.shape[:-1]) + [-1])
four_feat = torch.sin(torch.cat([xb, xb + 0.5 * np.pi], dim=-1))
if append_identity:
return torch.cat([x] + [four_feat], axis=-1)
else:
return four_feat | null |
20,298 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def img2mse(x, y):
return torch.mean((x - y) ** 2) | null |
20,299 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def mse2psnr(x):
return -10.0 * torch.log(x) / np.log(10) | null |
20,300 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def contract(mean, cov, is_train=True):
bsz, num_samples, dim = mean.shape
def _contract(x):
x_mag_sq = torch.sum(x**2, dim=-1, keepdim=True).clip(min=1e-32)
z = torch.where(
x_mag_sq <= 1, x, ((2 * torch.sqrt(x_mag_sq) - 1) / x_mag_sq) * x
)
return z
mean_reshape = mean.reshape(bsz * num_samples, dim)
cov_reshape = cov.reshape(bsz * num_samples, dim, dim)
if is_train:
ft_mean = functorch.vjp(_contract, mean)[0]
ft_jacobian = functorch.vmap(functorch.jacrev(_contract, argnums=0))(
mean_reshape
)
else:
with torch.inference_mode(False):
with torch.enable_grad():
ft_mean = functorch.vjp(_contract, mean)[0]
ft_jacobian = functorch.vmap(functorch.jacrev(_contract, argnums=0))(
mean_reshape
)
ft_cov = torch.einsum("bij, bjk -> bik", ft_jacobian, cov_reshape)
ft_cov = torch.einsum("bij, bkj -> bik", ft_cov, ft_jacobian)
return (
ft_mean.reshape(bsz, num_samples, dim).detach(),
ft_cov.reshape(bsz, num_samples, dim, dim).detach(),
) | null |
20,301 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def lift_and_diagonalize(means, covs, basis):
fn_mean = means @ basis
fn_cov_diag = torch.sum(basis[None, None, ...] * (covs @ basis), dim=-2)
return fn_mean, fn_cov_diag | null |
20,302 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def expected_sin(mean, var):
def integrated_pos_enc(mean, var, min_deg, max_deg):
scales = 2 ** torch.arange(min_deg, max_deg).type_as(mean)
shape = list(mean.shape[:-1]) + [
-1,
]
scaled_mean = torch.reshape(mean[..., None, :] * scales[:, None], shape)
scaled_var = torch.reshape(var[..., None, :] * scales[:, None] ** 2, shape)
return expected_sin(
torch.cat([scaled_mean, scaled_mean + 0.5 * np.pi], dim=-1),
torch.cat([scaled_var] * 2, dim=-1),
) | null |
20,303 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def pos_enc(x, min_deg, max_deg, append_identity):
scales = 2 ** torch.arange(min_deg, max_deg).type_as(x)
xb = torch.reshape((x[..., None, :] * scales[:, None]), x.shape[:-1] + (-1,))
four_feat = torch.sin(torch.cat([xb, xb + 0.5 * np.pi], dim=-1))
if append_identity:
return torch.cat([x] + [four_feat], dim=-1)
else:
return four_feat | null |
20,304 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
eps = 1.1920929e-07
def inner_outer(t0, t1, y1):
cy1 = torch.cat([torch.zeros_like(y1[..., :1]), torch.cumsum(y1, dim=-1)], dim=-1)
idx_lo, idx_hi = searchsorted(t1, t0)
cy1_lo = torch.take_along_dim(cy1, idx_lo, dim=-1)
cy1_hi = torch.take_along_dim(cy1, idx_hi, dim=-1)
y0_outer = cy1_hi[..., 1:] - cy1_lo[..., :-1]
y0_inner = torch.where(
idx_hi[..., :-1] <= idx_lo[..., 1:],
cy1_lo[..., 1:] - cy1_hi[..., :-1],
torch.zeros_like(cy1_lo[..., 1:]),
)
return y0_inner, y0_outer
def lossfun_outer(t, w, t_env, w_env):
_, w_outer = inner_outer(t, t_env, w_env)
return torch.clip(w - w_outer, min=0) ** 2 / (w + eps) | null |
20,305 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def lossfun_distortion(t, w):
ut = (t[..., 1:] + t[..., :-1]) / 2
dut = torch.abs(ut[..., :, None] - ut[..., None, :])
loss_inter = torch.sum(w * torch.sum(w[..., None, :] * dut, dim=-1), dim=-1)
loss_intra = torch.sum(w**2 * (t[..., 1:] - t[..., :-1]), dim=-1) / 3
return loss_inter + loss_intra | null |
20,306 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def construct_ray_warps(t_near, t_far):
s_near, s_far = 1 / t_near, 1 / t_far
t_to_s = lambda t: (1 / t - s_near) / (s_far - s_near)
s_to_t = lambda s: 1 / (s * s_far + (1 - s) * s_near)
return t_to_s, s_to_t | null |
20,307 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
eps = 1.1920929e-07
def max_dilate(t, w, dilation, domain):
t0 = t[..., :-1] - dilation
t1 = t[..., 1:] + dilation
t_dilate = torch.sort(torch.cat([t, t0, t1], dim=-1), dim=-1).values
t_dilate = torch.clip(t_dilate, domain[0], domain[1])
mask = (t0[..., None, :] <= t_dilate[..., None]) & (
t1[..., None, :] > t_dilate[..., None]
)
w_dilate = (
torch.where(mask, w[..., None, :], torch.zeros_like(w[..., None, :]))
.max(dim=-1)
.values[..., :-1]
)
return t_dilate, w_dilate
def weight_to_pdf(t, w):
return w.squeeze(-1) / torch.clip(t[..., 1:] - t[..., :-1], min=eps)
def pdf_to_weight(t, p):
return p * (t[..., 1:] - t[..., :-1])
def max_dilate_weights(t, w, dilation, domain, renormalize):
p = weight_to_pdf(t, w)
t_dilate, p_dilate = max_dilate(t, p, dilation, domain)
w_dilate = pdf_to_weight(t_dilate, p_dilate)
if renormalize:
w_dilate /= torch.clip(torch.sum(w_dilate, dim=-1, keepdim=True), min=eps)
return t_dilate, w_dilate | null |
20,308 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def compute_alpha_weights(density, tdist, dirs, opaque_background=False):
t_delta = tdist[..., 1:] - tdist[..., :-1]
delta = t_delta * torch.norm(dirs[..., None, :], dim=-1)
density_delta = density * delta
if opaque_background:
# Equivalent to making the final t-interval infinitely wide.
density_delta = torch.cat(
[
density_delta[..., :-1],
torch.full_like(density_delta[..., -1:], torch.inf),
],
dim=-1,
)
alpha = 1 - torch.exp(-density_delta)
trans = torch.exp(
-torch.cat(
[
torch.zeros_like(density_delta[..., :1]),
torch.cumsum(density_delta[..., :-1], dim=-1),
],
dim=-1,
)
)
weights = alpha * trans
return weights, alpha, trans | null |
20,309 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def volumetric_rendering(
rgbs, weights, tdist, bg_rgbs, t_far, compute_extras, extras=None
):
rendering = {}
acc = weights.sum(dim=-1)
bg_w = torch.clip(1 - acc[..., None], min=0) # The weight of the background.
rgb = (weights[..., None] * rgbs).sum(dim=-2) + bg_w * bg_rgbs
rendering["rgb"] = rgb
return rendering | null |
20,310 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def conical_frustum_to_gaussian(d, t0, t1, radius, diag):
mu = (t0 + t1) / 2
hw = (t1 - t0) / 2
t_mean = mu + (2 * mu * hw**2) / (3 * mu**2 + hw**2).clip(min=eps)
denom = (3 * mu**2 + hw**2).clip(min=eps)
t_var = (hw**2) / 3 - (4 / 15) * hw**4 * (12 * mu**2 - hw**2) / denom**2
r_var = (mu**2) / 4 + (5 / 12) * hw**2 - (4 / 15) * (hw**4) / denom
r_var *= radius**2
return lift_gaussian(d, t_mean, t_var, r_var, diag)
def cylinder_to_gaussian(d, t0, t1, radius, diag):
t_mean = (t0 + t1) / 2
r_var = radius**2 / 4
t_var = (t1 - t0) ** 2 / 12
return lift_gaussian(d, t_mean, t_var, r_var, diag)
def cast_rays(t_vals, origins, directions, radii, ray_shape, diag=True):
t0 = t_vals[..., :-1]
t1 = t_vals[..., 1:]
if ray_shape == "cone":
gaussian_fn = conical_frustum_to_gaussian
elif ray_shape == "cylinder":
gaussian_fn = cylinder_to_gaussian
else:
assert False
means, covs = gaussian_fn(directions, t0, t1, radii, diag)
means = means + origins[..., None, :]
return means, covs | null |
20,311 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def sample(
randomized,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
):
if not randomized:
if deterministic_center:
pad = 1 / (2 * num_samples)
u = torch.linspace(pad, 1 - pad - eps, num_samples)
else:
u = torch.linspace(0, 1 - eps, num_samples)
u = torch.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = (
torch.linspace(0, 1 - u_max, num_samples)
+ torch.rand(t.shape[:-1] + (d,)) * max_jitter
)
u = u.type_as(t)
return invert_cdf(u, t, w_logits)
def sample_intervals(
randomized,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-torch.inf, torch.inf),
):
centers = sample(
randomized,
t,
w_logits,
num_samples,
single_jitter,
deterministic_center=True,
)
mid = (centers[..., 1:] + centers[..., :-1]) / 2
min_val, max_val = domain
first = torch.clip(2 * centers[..., :1] - mid[..., :1], min=min_val)
last = torch.clip(2 * centers[..., -1:] - mid[..., -1:], max=max_val)
t_samples = torch.cat([first, mid, last], dim=-1)
return t_samples | null |
20,312 | import itertools
import functorch
import numpy as np
import torch
import torch.nn.functional as F
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f"v {v} must an integer")
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
The provided code snippet includes necessary dependencies for implementing the `generate_basis` function. Write a Python function `def generate_basis(base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4)` to solve the following problem:
Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n].
Here is the function:
def generate_basis(base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == "icosahedron":
a = (np.sqrt(5) + 1) / 2
verts = np.array(
[
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]
) / np.sqrt(a + 2)
faces = np.array(
[
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
]
)
verts = tesselate_geodesic(verts, faces, angular_tesselation)
elif base_shape == "octahedron":
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
verts = tesselate_geodesic(verts, faces, angular_tesselation)
else:
raise ValueError(f"base_shape {base_shape} not supported")
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[np.any(np.triu(match), 1), :]
basis = verts[:, ::-1].copy()
return torch.from_numpy(basis.T).to(dtype=torch.float32) | Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. |
20,313 | import numpy as np
import torch
import torch.nn.functional as F
def img2mse(x, y):
return torch.mean((x - y) ** 2) | null |
20,314 | import numpy as np
import torch
import torch.nn.functional as F
def mse2psnr(x):
return -10.0 * torch.log(x) / np.log(10) | null |
20,315 | import numpy as np
import torch
import torch.nn.functional as F
def cast_rays(t_vals, origins, directions):
return origins[..., None, :] + t_vals[..., None] * directions[..., None, :]
def depth2pts_outside(rays_o, rays_d, depth):
"""Compute the points along the ray that are outside of the unit sphere.
Args:
rays_o: [num_rays, 3]. Ray origins of the points.
rays_d: [num_rays, 3]. Ray directions of the points.
depth: [num_rays, num_samples along ray]. Inverse of distance to sphere origin.
Returns:
pts: [num_rays, 4]. Points outside of the unit sphere. (x', y', z', 1/r)
"""
# note: d1 becomes negative if this mid point is behind camera
rays_o = rays_o[..., None, :].expand(
list(depth.shape) + [3]
) # [N_rays, num_samples, 3]
rays_d = rays_d[..., None, :].expand(
list(depth.shape) + [3]
) # [N_rays, num_samples, 3]
d1 = -torch.sum(rays_d * rays_o, dim=-1, keepdim=True) / torch.sum(
rays_d**2, dim=-1, keepdim=True
)
p_mid = rays_o + d1 * rays_d
p_mid_norm = torch.norm(p_mid, dim=-1, keepdim=True)
rays_d_cos = 1.0 / torch.norm(rays_d, dim=-1, keepdim=True)
d2 = torch.sqrt(1.0 - p_mid_norm * p_mid_norm) * rays_d_cos
p_sphere = rays_o + (d1 + d2) * rays_d
rot_axis = torch.cross(rays_o, p_sphere, dim=-1)
rot_axis = rot_axis / torch.norm(rot_axis, dim=-1, keepdim=True)
phi = torch.asin(p_mid_norm)
theta = torch.asin(p_mid_norm * depth[..., None]) # depth is inside [0, 1]
rot_angle = phi - theta # [..., 1]
# now rotate p_sphere
# Rodrigues formula: https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
p_sphere_new = (
p_sphere * torch.cos(rot_angle)
+ torch.cross(rot_axis, p_sphere, dim=-1) * torch.sin(rot_angle)
+ rot_axis
* torch.sum(rot_axis * p_sphere, dim=-1, keepdim=True)
* (1.0 - torch.cos(rot_angle))
)
p_sphere_new = p_sphere_new / (
torch.norm(p_sphere_new, dim=-1, keepdim=True) + 1e-10
)
pts = torch.cat((p_sphere_new, depth.unsqueeze(-1)), dim=-1)
return pts
def sample_along_rays(
rays_o,
rays_d,
num_samples,
near,
far,
randomized,
lindisp,
in_sphere,
):
bsz = rays_o.shape[0]
t_vals = torch.linspace(0.0, 1.0, num_samples + 1, device=rays_o.device)
if in_sphere:
if lindisp:
t_vals = 1.0 / (1.0 / near * (1.0 - t_vals) + 1.0 / far * t_vals)
else:
t_vals = near * (1.0 - t_vals) + far * t_vals
else:
t_vals = torch.broadcast_to(t_vals, (bsz, num_samples + 1))
if randomized:
mids = 0.5 * (t_vals[..., 1:] + t_vals[..., :-1])
upper = torch.cat([mids, t_vals[..., -1:]], -1)
lower = torch.cat([t_vals[..., :1], mids], -1)
t_rand = torch.rand((bsz, num_samples + 1), device=rays_o.device)
t_vals = lower + (upper - lower) * t_rand
else:
t_vals = torch.broadcast_to(t_vals, (bsz, num_samples + 1))
if in_sphere:
coords = cast_rays(t_vals, rays_o, rays_d)
else:
t_vals = torch.flip(
t_vals,
dims=[
-1,
],
) # 1.0 -> 0.0
coords = depth2pts_outside(rays_o, rays_d, t_vals)
return t_vals, coords | null |
20,316 | import numpy as np
import torch
import torch.nn.functional as F
def pos_enc(x, min_deg, max_deg):
scales = torch.tensor([2**i for i in range(min_deg, max_deg)]).type_as(x)
xb = torch.reshape((x[..., None, :] * scales[:, None]), list(x.shape[:-1]) + [-1])
four_feat = torch.sin(torch.cat([xb, xb + 0.5 * np.pi], dim=-1))
return torch.cat([x] + [four_feat], dim=-1) | null |
20,317 | import numpy as np
import torch
import torch.nn.functional as F
def volumetric_rendering(rgb, density, t_vals, dirs, white_bkgd, in_sphere, t_far=None):
eps = 1e-10
if in_sphere:
dists = t_vals[..., 1:] - t_vals[..., :-1]
dists = torch.cat([dists, t_far - t_vals[..., -1:]], dim=-1)
dists *= torch.norm(dirs[..., None, :], dim=-1)
else:
dists = t_vals[..., :-1] - t_vals[..., 1:]
dists = torch.cat([dists, torch.full_like(t_vals[..., :1], 1e10)], dim=-1)
alpha = 1.0 - torch.exp(-density[..., 0] * dists)
T = torch.cumprod(1.0 - alpha + eps, dim=-1)
bg_lambda = T[..., -1:] if in_sphere else None
accum_prod = torch.cat([torch.ones_like(T[..., -1:]), T[..., :-1]], dim=-1)
weights = alpha * accum_prod
comp_rgb = (weights[..., None] * rgb).sum(dim=-2)
acc = weights.sum(dim=-1)
return comp_rgb, acc, weights, bg_lambda | null |
20,318 | import numpy as np
import torch
import torch.nn.functional as F
def cast_rays(t_vals, origins, directions):
return origins[..., None, :] + t_vals[..., None] * directions[..., None, :]
def sorted_piecewise_constant_pdf(
bins, weights, num_samples, randomized, float_min_eps=2**-32
):
eps = 1e-5
weight_sum = weights.sum(dim=-1, keepdims=True)
padding = torch.fmax(torch.zeros_like(weight_sum), eps - weight_sum)
weights = weights + padding / weights.shape[-1]
weight_sum = weight_sum + padding
pdf = weights / weight_sum
cdf = torch.fmin(
torch.ones_like(pdf[..., :-1]), torch.cumsum(pdf[..., :-1], dim=-1)
)
cdf = torch.cat(
[
torch.zeros(list(cdf.shape[:-1]) + [1], device=weights.device),
cdf,
torch.ones(list(cdf.shape[:-1]) + [1], device=weights.device),
],
dim=-1,
)
s = 1 / num_samples
if randomized:
u = torch.rand(list(cdf.shape[:-1]) + [num_samples], device=cdf.device)
else:
u = torch.linspace(0.0, 1.0 - float_min_eps, num_samples, device=cdf.device)
u = torch.broadcast_to(u, list(cdf.shape[:-1]) + [num_samples])
mask = u[..., None, :] >= cdf[..., :, None]
bin0 = (mask * bins[..., None] + ~mask * bins[..., :1, None]).max(dim=-2)[0]
bin1 = (~mask * bins[..., None] + mask * bins[..., -1:, None]).min(dim=-2)[0]
# Debug Here
cdf0 = (mask * cdf[..., None] + ~mask * cdf[..., :1, None]).max(dim=-2)[0]
cdf1 = (~mask * cdf[..., None] + mask * cdf[..., -1:, None]).min(dim=-2)[0]
t = torch.clip(torch.nan_to_num((u - cdf0) / (cdf1 - cdf0), 0), 0, 1)
samples = bin0 + t * (bin1 - bin0)
return samples
def depth2pts_outside(rays_o, rays_d, depth):
"""Compute the points along the ray that are outside of the unit sphere.
Args:
rays_o: [num_rays, 3]. Ray origins of the points.
rays_d: [num_rays, 3]. Ray directions of the points.
depth: [num_rays, num_samples along ray]. Inverse of distance to sphere origin.
Returns:
pts: [num_rays, 4]. Points outside of the unit sphere. (x', y', z', 1/r)
"""
# note: d1 becomes negative if this mid point is behind camera
rays_o = rays_o[..., None, :].expand(
list(depth.shape) + [3]
) # [N_rays, num_samples, 3]
rays_d = rays_d[..., None, :].expand(
list(depth.shape) + [3]
) # [N_rays, num_samples, 3]
d1 = -torch.sum(rays_d * rays_o, dim=-1, keepdim=True) / torch.sum(
rays_d**2, dim=-1, keepdim=True
)
p_mid = rays_o + d1 * rays_d
p_mid_norm = torch.norm(p_mid, dim=-1, keepdim=True)
rays_d_cos = 1.0 / torch.norm(rays_d, dim=-1, keepdim=True)
d2 = torch.sqrt(1.0 - p_mid_norm * p_mid_norm) * rays_d_cos
p_sphere = rays_o + (d1 + d2) * rays_d
rot_axis = torch.cross(rays_o, p_sphere, dim=-1)
rot_axis = rot_axis / torch.norm(rot_axis, dim=-1, keepdim=True)
phi = torch.asin(p_mid_norm)
theta = torch.asin(p_mid_norm * depth[..., None]) # depth is inside [0, 1]
rot_angle = phi - theta # [..., 1]
# now rotate p_sphere
# Rodrigues formula: https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula
p_sphere_new = (
p_sphere * torch.cos(rot_angle)
+ torch.cross(rot_axis, p_sphere, dim=-1) * torch.sin(rot_angle)
+ rot_axis
* torch.sum(rot_axis * p_sphere, dim=-1, keepdim=True)
* (1.0 - torch.cos(rot_angle))
)
p_sphere_new = p_sphere_new / (
torch.norm(p_sphere_new, dim=-1, keepdim=True) + 1e-10
)
pts = torch.cat((p_sphere_new, depth.unsqueeze(-1)), dim=-1)
return pts
def sample_pdf(
bins, weights, origins, directions, t_vals, num_samples, randomized, in_sphere
):
t_samples = sorted_piecewise_constant_pdf(
bins, weights, num_samples, randomized
).detach()
t_vals = torch.sort(torch.cat([t_vals, t_samples], dim=-1), dim=-1).values
if in_sphere:
coords = cast_rays(t_vals, origins, directions)
else:
t_vals = torch.flip(
t_vals,
dims=[
-1,
],
) # 1.0 -> 0.0
coords = depth2pts_outside(origins, directions, t_vals)
return t_vals, coords | null |
20,319 | import numpy as np
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `intersect_sphere` function. Write a Python function `def intersect_sphere(rays_o, rays_d)` to solve the following problem:
Compute the depth of the intersection point between this ray and unit sphere. Args: rays_o: [num_rays, 3]. Ray origins. rays_d: [num_rays, 3]. Ray directions. Returns: depth: [num_rays, 1]. Depth of the intersection point.
Here is the function:
def intersect_sphere(rays_o, rays_d):
"""Compute the depth of the intersection point between this ray and unit sphere.
Args:
rays_o: [num_rays, 3]. Ray origins.
rays_d: [num_rays, 3]. Ray directions.
Returns:
depth: [num_rays, 1]. Depth of the intersection point.
"""
# note: d1 becomes negative if this mid point is behind camera
d1 = -torch.sum(rays_d * rays_o, dim=-1, keepdim=True) / torch.sum(
rays_d**2, dim=-1, keepdim=True
)
p = rays_o + d1 * rays_d
# consider the case where the ray does not intersect the sphere
rays_d_cos = 1.0 / torch.norm(rays_d, dim=-1, keepdim=True)
p_norm_sq = torch.sum(p * p, dim=-1, keepdim=True)
d2 = torch.sqrt(1.0 - p_norm_sq) * rays_d_cos
return d1 + d2 | Compute the depth of the intersection point between this ray and unit sphere. Args: rays_o: [num_rays, 3]. Ray origins. rays_d: [num_rays, 3]. Ray directions. Returns: depth: [num_rays, 1]. Depth of the intersection point. |
20,320 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def img2mse(x, y):
return torch.mean((x - y) ** 2) | null |
20,321 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def mse2psnr(x):
return -10.0 * torch.log(x) / np.log(10) | null |
20,322 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def inthroot(x: int, n: int):
if x <= 0:
return None
lo, hi = 1, x
while lo <= hi:
mi = lo + (hi - lo) // 2
p = mi**n
if p == x:
return mi
elif p > x:
hi = mi - 1
else:
lo = mi + 1
return None | null |
20,323 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def _unexpand_bits(v):
v &= 0x49249249
v = (v | (v >> 2)) & 0xC30C30C3
v = (v | (v >> 4)) & 0xF00F00F
v = (v | (v >> 8)) & 0xFF0000FF
v = (v | (v >> 16)) & 0x0000FFFF
return v
def inv_morton_code_3(code):
x = _unexpand_bits(code >> 2)
y = _unexpand_bits(code >> 1)
z = _unexpand_bits(code)
return x, y, z | null |
20,324 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def is_pow2(x: int):
return x > 0 and (x & (x - 1)) == 0
def morton_code_3(x, y, z):
xx = _expand_bits(x)
yy = _expand_bits(y)
zz = _expand_bits(z)
return (xx << 2) + (yy << 1) + zz
def gen_morton(D, device, dtype=torch.long):
assert is_pow2(D), "Morton code requires power of 2 reso"
arr = torch.arange(D, device=device, dtype=dtype)
X, Y, Z = torch.meshgrid(arr, arr, arr)
mort = morton_code_3(X, Y, Z)
return mort | null |
20,325 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
SH_C0 = 0.28209479177387814
SH_C1 = 0.4886025119029199
SH_C2 = [
1.0925484305920792,
-1.0925484305920792,
0.31539156525252005,
-1.0925484305920792,
0.5462742152960396,
]
SH_C3 = [
-0.5900435899266435,
2.890611442640554,
-0.4570457994644658,
0.3731763325901154,
-0.4570457994644658,
1.445305721320277,
-0.5900435899266435,
]
SH_C4 = [
2.5033429417967046,
-1.7701307697799304,
0.9461746957575601,
-0.6690465435572892,
0.10578554691520431,
-0.6690465435572892,
0.47308734787878004,
-1.7701307697799304,
0.6258357354491761,
]
The provided code snippet includes necessary dependencies for implementing the `eval_sh_bases` function. Write a Python function `def eval_sh_bases(basis_dim: int, dirs: torch.Tensor)` to solve the following problem:
Evaluate spherical harmonics bases at unit directions, without taking linear combination. At each point, the final result may the be obtained through simple multiplication. :param basis_dim: int SH basis dim. Currently, 1-25 square numbers supported :param dirs: torch.Tensor (..., 3) unit directions :return: torch.Tensor (..., basis_dim)
Here is the function:
def eval_sh_bases(basis_dim: int, dirs: torch.Tensor):
"""
Evaluate spherical harmonics bases at unit directions,
without taking linear combination.
At each point, the final result may the be
obtained through simple multiplication.
:param basis_dim: int SH basis dim. Currently, 1-25 square numbers supported
:param dirs: torch.Tensor (..., 3) unit directions
:return: torch.Tensor (..., basis_dim)
"""
result = torch.empty(
(*dirs.shape[:-1], basis_dim), dtype=dirs.dtype, device=dirs.device
)
result[..., 0] = SH_C0
if basis_dim > 1:
x, y, z = dirs.unbind(-1)
result[..., 1] = -SH_C1 * y
result[..., 2] = SH_C1 * z
result[..., 3] = -SH_C1 * x
if basis_dim > 4:
xx, yy, zz = x * x, y * y, z * z
xy, yz, xz = x * y, y * z, x * z
result[..., 4] = SH_C2[0] * xy
result[..., 5] = SH_C2[1] * yz
result[..., 6] = SH_C2[2] * (2.0 * zz - xx - yy)
result[..., 7] = SH_C2[3] * xz
result[..., 8] = SH_C2[4] * (xx - yy)
if basis_dim > 9:
result[..., 9] = SH_C3[0] * y * (3 * xx - yy)
result[..., 10] = SH_C3[1] * xy * z
result[..., 11] = SH_C3[2] * y * (4 * zz - xx - yy)
result[..., 12] = SH_C3[3] * z * (2 * zz - 3 * xx - 3 * yy)
result[..., 13] = SH_C3[4] * x * (4 * zz - xx - yy)
result[..., 14] = SH_C3[5] * z * (xx - yy)
result[..., 15] = SH_C3[6] * x * (xx - 3 * yy)
if basis_dim > 16:
result[..., 16] = SH_C4[0] * xy * (xx - yy)
result[..., 17] = SH_C4[1] * yz * (3 * xx - yy)
result[..., 18] = SH_C4[2] * xy * (7 * zz - 1)
result[..., 19] = SH_C4[3] * yz * (7 * zz - 3)
result[..., 20] = SH_C4[4] * (zz * (35 * zz - 30) + 3)
result[..., 21] = SH_C4[5] * xz * (7 * zz - 3)
result[..., 22] = SH_C4[6] * (xx - yy) * (7 * zz - 1)
result[..., 23] = SH_C4[7] * xz * (xx - 3 * yy)
result[..., 24] = SH_C4[8] * (
xx * (xx - 3 * yy) - yy * (3 * xx - yy)
)
return result | Evaluate spherical harmonics bases at unit directions, without taking linear combination. At each point, the final result may the be obtained through simple multiplication. :param basis_dim: int SH basis dim. Currently, 1-25 square numbers supported :param dirs: torch.Tensor (..., 3) unit directions :return: torch.Tensor (..., basis_dim) |
20,326 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
class CubemapCoord:
ax: torch.Tensor
ori: torch.Tensor
u: torch.Tensor
v: torch.Tensor
def query_in(self, cubemap: torch.Tensor):
face = self.ax * 2 + self.ori
# print(cubemap.shape, face.min(), face.max(), ' ',
# self.u.min(), self.u.max(),
if cubemap.ndim == 4:
return cubemap[face, self.u, self.v]
else:
return cubemap[
torch.arange(face.size(0), device=face.device), face, self.u, self.v
]
def clone(self):
return CubemapCoord(
self.ax.clone(), self.ori.clone(), self.u.clone(), self.v.clone()
)
The provided code snippet includes necessary dependencies for implementing the `dir_to_cubemap_coord` function. Write a Python function `def dir_to_cubemap_coord( xyz: torch.Tensor, face_reso: int, eac: bool = True ) -> CubemapCoord` to solve the following problem:
Convert a direction on a sphere (not necessarily normalized) :param xyz: direction (not necessarily normalized) :param face_reso: int, resolution of cubemap face :param eac: bool, if true (default) then uses equi-angular cubemaps (EAC) instead of standard cubemap; see https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/ :return: CubemapCoord
Here is the function:
def dir_to_cubemap_coord(
xyz: torch.Tensor, face_reso: int, eac: bool = True
) -> CubemapCoord:
"""
Convert a direction on a sphere (not necessarily normalized)
:param xyz: direction (not necessarily normalized)
:param face_reso: int, resolution of cubemap face
:param eac: bool, if true (default) then uses equi-angular cubemaps (EAC)
instead of standard cubemap; see
https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/
:return: CubemapCoord
"""
xyz = xyz.float()
maxv, ax = torch.max(torch.abs(xyz), dim=-1)
xyz = xyz * (1.0 / maxv.unsqueeze(-1))
if eac:
xyz_eac = torch.atan(xyz) * (4 / math.pi)
else:
xyz_eac = xyz
arr = torch.arange(ax.size(0))
ud = (ax ^ 1) & 1
vd = (ax ^ 2) & 2
u_eac = xyz_eac[arr, ud]
v_eac = xyz_eac[arr, vd]
ori = (xyz_eac[arr, ax] >= 0).long()
u = ((u_eac + 1) * face_reso - 1.0) * 0.5
v = ((v_eac + 1) * face_reso - 1.0) * 0.5
return CubemapCoord(ax, ori, u, v) | Convert a direction on a sphere (not necessarily normalized) :param xyz: direction (not necessarily normalized) :param face_reso: int, resolution of cubemap face :param eac: bool, if true (default) then uses equi-angular cubemaps (EAC) instead of standard cubemap; see https://blog.google/products/google-ar-vr/bringing-pixels-front-and-center-vr-video/ :return: CubemapCoord |
20,327 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
class CubemapCoord:
ax: torch.Tensor
ori: torch.Tensor
u: torch.Tensor
v: torch.Tensor
def query_in(self, cubemap: torch.Tensor):
face = self.ax * 2 + self.ori
# print(cubemap.shape, face.min(), face.max(), ' ',
# self.u.min(), self.u.max(),
if cubemap.ndim == 4:
return cubemap[face, self.u, self.v]
else:
return cubemap[
torch.arange(face.size(0), device=face.device), face, self.u, self.v
]
def clone(self):
return CubemapCoord(
self.ax.clone(), self.ori.clone(), self.u.clone(), self.v.clone()
)
class CubemapBilerpQuery:
i00: CubemapCoord
i01: CubemapCoord
i10: CubemapCoord
i11: CubemapCoord
du: torch.Tensor
dv: torch.Tensor
The provided code snippet includes necessary dependencies for implementing the `cubemap_build_query` function. Write a Python function `def cubemap_build_query( idx: CubemapCoord, face_reso: int, mode: str = "linear" ) -> CubemapBilerpQuery` to solve the following problem:
Compute the points on the cubemap for bilinear slinear_simple interpolates per-face, while linear also interpolates across edges (this is the only one supported in CUDAarest, linear_simple, linear; linear_simple interpolates per-face, while linear also interpolates across edges (this is the only one supported in CUDA) :return: CubemapBilerpQuery
Here is the function:
def cubemap_build_query(
idx: CubemapCoord, face_reso: int, mode: str = "linear"
) -> CubemapBilerpQuery:
"""
Compute the points on the cubemap for bilinear slinear_simple interpolates per-face, while linear also
interpolates across edges (this is the only one supported
in CUDAarest, linear_simple, linear;
linear_simple interpolates per-face, while linear also
interpolates across edges (this is the only one supported
in CUDA)
:return: CubemapBilerpQuery
"""
if mode == "nearest":
uf = torch.floor(idx.u + 0.5).long().clamp_(0, face_reso - 1)
vf = torch.floor(idx.v + 0.5).long().clamp_(0, face_reso - 1)
idx_ul = CubemapCoord(idx.ax, idx.ori, uf, vf)
# Corner: triple average
return CubemapBilerpQuery(
idx_ul,
idx_ul,
idx_ul,
idx_ul,
torch.zeros_like(idx.u),
torch.zeros_like(idx.v),
)
elif mode == "linear_simple":
u = idx.u.clamp(0, face_reso - 2)
v = idx.v.clamp(0, face_reso - 2)
uf = torch.floor(u).long()
vf = torch.floor(v).long()
uc = uf + 1
vc = vf + 1
du = u - uf
dv = v - vf
return CubemapBilerpQuery(
CubemapCoord(idx.ax, idx.ori, uf, vf),
CubemapCoord(idx.ax, idx.ori, uf, vc),
CubemapCoord(idx.ax, idx.ori, uc, vf),
CubemapCoord(idx.ax, idx.ori, uc, vc),
du,
dv,
)
elif mode == "linear":
uf = torch.floor(idx.u).long()
vf = torch.floor(idx.v).long()
uc = uf + 1
vc = vf + 1
m0u = uf < 0
m0v = vf < 0
m1u = uc > (face_reso - 1)
m1v = vc > (face_reso - 1)
ud = (idx.ax ^ 1) & 1
vd = (idx.ax ^ 2) & 2
def _index_across_sides(nidx: CubemapCoord, uori, vori, mu, mv):
mdiagonal = mu & mv
# FIXME not quite correct (matches CUDA impl)
mu = mu & (~mdiagonal)
mv = mv & (~mdiagonal)
nidx.u[mdiagonal] = nidx.u[mdiagonal].clamp(0, face_reso - 1)
nidx.v[mdiagonal] = nidx.v[mdiagonal].clamp(0, face_reso - 1)
def _index_across_one_side(mask, d, ori, other_coord):
nax = d[mask]
nud = (nax ^ 1) & 1
# nvd = (nax ^ 2) & 2
ax_is_u = nud == nidx.ax[mask]
ax_is_v = ~ax_is_u
ax_is_u_m = torch.zeros_like(mask)
ax_is_u_m[mask] = ax_is_u
ax_is_v_m = torch.zeros_like(mask)
ax_is_v_m[mask] = ax_is_v
nidx.u[ax_is_v_m] = other_coord[ax_is_v_m]
nidx.v[ax_is_u_m] = other_coord[ax_is_u_m]
nidx.u[ax_is_u_m] = nidx.ori[ax_is_u_m] * (face_reso - 1)
nidx.v[ax_is_v_m] = nidx.ori[ax_is_v_m] * (face_reso - 1)
nidx.ax[mask] = nax
nidx.ori[mask] = ori
_index_across_one_side(mu, ud, uori, nidx.v)
_index_across_one_side(mv, vd, vori, nidx.u)
return nidx
i00 = _index_across_sides(
CubemapCoord(idx.ax, idx.ori, uf, vf).clone(), 0, 0, m0u, m0v
)
i01 = _index_across_sides(
CubemapCoord(idx.ax, idx.ori, uf, vc).clone(), 0, 1, m0u, m1v
)
i10 = _index_across_sides(
CubemapCoord(idx.ax, idx.ori, uc, vf).clone(), 1, 0, m1u, m0v
)
i11 = _index_across_sides(
CubemapCoord(idx.ax, idx.ori, uc, vc).clone(), 1, 1, m1u, m1v
)
du = idx.u - uf
dv = idx.v - vf
return CubemapBilerpQuery(i00, i01, i10, i11, du, dv)
else:
raise NotImplementedError() | Compute the points on the cubemap for bilinear slinear_simple interpolates per-face, while linear also interpolates across edges (this is the only one supported in CUDAarest, linear_simple, linear; linear_simple interpolates per-face, while linear also interpolates across edges (this is the only one supported in CUDA) :return: CubemapBilerpQuery |
20,328 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
class CubemapBilerpQuery:
i00: CubemapCoord
i01: CubemapCoord
i10: CubemapCoord
i11: CubemapCoord
du: torch.Tensor
dv: torch.Tensor
The provided code snippet includes necessary dependencies for implementing the `cubemap_sample` function. Write a Python function `def cubemap_sample(cubemap: torch.Tensor, idx4: CubemapBilerpQuery)` to solve the following problem:
Perform bilinear sampling on a cubemap given a query from cubemap_build_query :param cubemap: torch.Tensor float (6, face_reso, face_reso, C) or (B, 6, face_reso, face_reso, C) :param idx4: CubemapBilerpQuery from cubemap_build_query where each tensor has batch size B :return: (B, C)
Here is the function:
def cubemap_sample(cubemap: torch.Tensor, idx4: CubemapBilerpQuery):
"""
Perform bilinear sampling on a cubemap given a query from cubemap_build_query
:param cubemap: torch.Tensor float
(6, face_reso, face_reso, C)
or
(B, 6, face_reso, face_reso, C)
:param idx4: CubemapBilerpQuery from cubemap_build_query where
each tensor has batch size B
:return: (B, C)
"""
face_reso = cubemap.size(2)
v00 = idx4.i00.query_in(cubemap)
v01 = idx4.i01.query_in(cubemap)
v10 = idx4.i10.query_in(cubemap)
v11 = idx4.i11.query_in(cubemap)
du = idx4.du.view([-1] + (v00.dim() - 1) * [1])
dv = idx4.dv.view([-1] + (v00.dim() - 1) * [1])
r0 = v00 * (1 - dv) + v01 * dv
r1 = v10 * (1 - dv) + v11 * dv
return r0 * (1 - du) + r1 * du | Perform bilinear sampling on a cubemap given a query from cubemap_build_query :param cubemap: torch.Tensor float (6, face_reso, face_reso, C) or (B, 6, face_reso, face_reso, C) :param idx4: CubemapBilerpQuery from cubemap_build_query where each tensor has batch size B :return: (B, C) |
20,329 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def memlog(device="cuda"):
# Memory debugging
print(torch.cuda.memory_summary(device))
import gc
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (
hasattr(obj, "data") and torch.is_tensor(obj.data)
):
if str(obj.device) != "cpu":
print(
obj.device,
"{: 10}".format(obj.numel()),
obj.dtype,
obj.size(),
type(obj),
)
except:
pass | null |
20,330 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `spher2cart` function. Write a Python function `def spher2cart(theta: torch.Tensor, phi: torch.Tensor)` to solve the following problem:
Convert spherical coordinates into Cartesian coordinates on unit sphere.
Here is the function:
def spher2cart(theta: torch.Tensor, phi: torch.Tensor):
"""Convert spherical coordinates into Cartesian coordinates on unit sphere."""
x = torch.sin(theta) * torch.cos(phi)
y = torch.sin(theta) * torch.sin(phi)
z = torch.cos(theta)
return torch.stack([x, y, z], dim=-1) | Convert spherical coordinates into Cartesian coordinates on unit sphere. |
20,331 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `eval_sg_at_dirs` function. Write a Python function `def eval_sg_at_dirs(sg_lambda: torch.Tensor, sg_mu: torch.Tensor, dirs: torch.Tensor)` to solve the following problem:
Evaluate spherical Gaussian functions at unit directions using learnable SG basis, without taking linear combination Works with torch. ... Can be 0 or more batch dimensions. N is the number of SG basis we use. :math:`Output = \sigma_{i}{exp ^ {\lambda_i * (\dot(\mu_i, \dirs) - 1)}` :param sg_lambda: The sharpness of the SG lobes. (N), positive :param sg_mu: The directions of the SG lobes. (N, 3), unit vector :param dirs: jnp.ndarray unit directions (..., 3) :return: (..., N)
Here is the function:
def eval_sg_at_dirs(sg_lambda: torch.Tensor, sg_mu: torch.Tensor, dirs: torch.Tensor):
"""
Evaluate spherical Gaussian functions at unit directions
using learnable SG basis,
without taking linear combination
Works with torch.
... Can be 0 or more batch dimensions.
N is the number of SG basis we use.
:math:`Output = \sigma_{i}{exp ^ {\lambda_i * (\dot(\mu_i, \dirs) - 1)}`
:param sg_lambda: The sharpness of the SG lobes. (N), positive
:param sg_mu: The directions of the SG lobes. (N, 3), unit vector
:param dirs: jnp.ndarray unit directions (..., 3)
:return: (..., N)
"""
product = torch.einsum("ij,...j->...i", sg_mu, dirs) # [..., N]
basis = torch.exp(torch.einsum("i,...i->...i", sg_lambda, product - 1)) # [..., N]
return basis | Evaluate spherical Gaussian functions at unit directions using learnable SG basis, without taking linear combination Works with torch. ... Can be 0 or more batch dimensions. N is the number of SG basis we use. :math:`Output = \sigma_{i}{exp ^ {\lambda_i * (\dot(\mu_i, \dirs) - 1)}` :param sg_lambda: The sharpness of the SG lobes. (N), positive :param sg_mu: The directions of the SG lobes. (N, 3), unit vector :param dirs: jnp.ndarray unit directions (..., 3) :return: (..., N) |
20,332 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.0) | null |
20,333 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `cross_broadcast` function. Write a Python function `def cross_broadcast(x: torch.Tensor, y: torch.Tensor)` to solve the following problem:
Cross broadcasting for 2 tensors :param x: torch.Tensor :param y: torch.Tensor, should have the same ndim as x :return: tuple of cross-broadcasted tensors x, y. Any dimension where the size of x or y is 1 is expanded to the maximum size in that dimension among the 2. Formally, say the shape of x is (a1, ... an) and of y is (b1, ... bn); then the result has shape (a'1, ... a'n), (b'1, ... b'n) where :code:`a'i = ai if (ai > 1 and bi > 1) else max(ai, bi)` :code:`b'i = bi if (ai > 1 and bi > 1) else max(ai, bi)`
Here is the function:
def cross_broadcast(x: torch.Tensor, y: torch.Tensor):
"""
Cross broadcasting for 2 tensors
:param x: torch.Tensor
:param y: torch.Tensor, should have the same ndim as x
:return: tuple of cross-broadcasted tensors x, y. Any dimension where the size of x or y is 1
is expanded to the maximum size in that dimension among the 2.
Formally, say the shape of x is (a1, ... an)
and of y is (b1, ... bn);
then the result has shape (a'1, ... a'n), (b'1, ... b'n)
where
:code:`a'i = ai if (ai > 1 and bi > 1) else max(ai, bi)`
:code:`b'i = bi if (ai > 1 and bi > 1) else max(ai, bi)`
"""
assert x.ndim == y.ndim, "Only available if ndim is same for all tensors"
max_shape = [
(-1 if (a > 1 and b > 1) else max(a, b))
for i, (a, b) in enumerate(zip(x.shape, y.shape))
]
shape_x = [max(a, m) for m, a in zip(max_shape, x.shape)]
shape_y = [max(b, m) for m, b in zip(max_shape, y.shape)]
x = x.broadcast_to(shape_x)
y = y.broadcast_to(shape_y)
return x, y | Cross broadcasting for 2 tensors :param x: torch.Tensor :param y: torch.Tensor, should have the same ndim as x :return: tuple of cross-broadcasted tensors x, y. Any dimension where the size of x or y is 1 is expanded to the maximum size in that dimension among the 2. Formally, say the shape of x is (a1, ... an) and of y is (b1, ... bn); then the result has shape (a'1, ... a'n), (b'1, ... b'n) where :code:`a'i = ai if (ai > 1 and bi > 1) else max(ai, bi)` :code:`b'i = bi if (ai > 1 and bi > 1) else max(ai, bi)` |
20,334 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def net_to_dict(out_dict: dict, prefix: str, model: nn.Module):
for child in model.named_children():
layer_name = child[0]
layer_params = {}
for param in child[1].named_parameters():
param_name = param[0]
param_value = param[1].data.cpu().numpy()
out_dict[
"pt__" + prefix + "__" + layer_name + "__" + param_name
] = param_value | null |
20,335 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
def net_from_dict(in_dict, prefix: str, model: nn.Module):
for child in model.named_children():
layer_name = child[0]
layer_params = {}
for param in child[1].named_parameters():
param_name = param[0]
value = in_dict["pt__" + prefix + "__" + layer_name + "__" + param_name]
param_value = param[1].data[:] = torch.from_numpy(value).to(
device=param[1].data.device
) | null |
20,336 | import math
from dataclasses import dataclass
from functools import partial
import numpy as np
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `xyz2equirect` function. Write a Python function `def xyz2equirect(bearings, reso)` to solve the following problem:
Convert ray direction vectors into equirectangular pixel coordinates. Inverse of equirect2xyz. Taken from Vickie Ye
Here is the function:
def xyz2equirect(bearings, reso):
"""
Convert ray direction vectors into equirectangular pixel coordinates.
Inverse of equirect2xyz.
Taken from Vickie Ye
"""
lat = torch.asin(bearings[..., 1])
lon = torch.atan2(bearings[..., 0], bearings[..., 2])
x = reso * 2 * (0.5 + lon / 2 / np.pi)
y = reso * (0.5 - lat / np.pi)
return torch.stack([x, y], dim=-1) | Convert ray direction vectors into equirectangular pixel coordinates. Inverse of equirect2xyz. Taken from Vickie Ye |
20,337 |
def _get_c_extension():
from warnings import warn
try:
import lib.plenoxel as _C
if not hasattr(_C, "sample_grid"):
_C = None
except:
_C = None
return _C | null |
20,340 | import numpy as np
import torch
import torch.nn.functional as F
def cast_rays(t_vals, origins, directions):
return origins[..., None, :] + t_vals[..., None] * directions[..., None, :]
def sample_along_rays(
rays_o,
rays_d,
num_samples,
near,
far,
randomized,
lindisp,
):
bsz = rays_o.shape[0]
t_vals = torch.linspace(0.0, 1.0, num_samples + 1, device=rays_o.device)
if lindisp:
t_vals = 1.0 / (1.0 / near * (1.0 - t_vals) + 1.0 / far * t_vals)
else:
t_vals = near * (1.0 - t_vals) + far * t_vals
if randomized:
mids = 0.5 * (t_vals[..., 1:] + t_vals[..., :-1])
upper = torch.cat([mids, t_vals[..., -1:]], -1)
lower = torch.cat([t_vals[..., :1], mids], -1)
t_rand = torch.rand((bsz, num_samples + 1), device=rays_o.device)
t_vals = lower + (upper - lower) * t_rand
else:
t_vals = torch.broadcast_to(t_vals, (bsz, num_samples + 1))
coords = cast_rays(t_vals, rays_o, rays_d)
return t_vals, coords | null |
20,342 | import numpy as np
import torch
import torch.nn.functional as F
def volumetric_rendering(rgb, density, t_vals, dirs, white_bkgd):
eps = 1e-10
dists = torch.cat(
[
t_vals[..., 1:] - t_vals[..., :-1],
torch.ones(t_vals[..., :1].shape, device=t_vals.device) * 1e10,
],
dim=-1,
)
dists = dists * torch.norm(dirs[..., None, :], dim=-1)
alpha = 1.0 - torch.exp(-density[..., 0] * dists)
accum_prod = torch.cat(
[
torch.ones_like(alpha[..., :1]),
torch.cumprod(1.0 - alpha[..., :-1] + eps, dim=-1),
],
dim=-1,
)
weights = alpha * accum_prod
comp_rgb = (weights[..., None] * rgb).sum(dim=-2)
depth = (weights * t_vals).sum(dim=-1)
acc = weights.sum(dim=-1)
inv_eps = 1 / eps
if white_bkgd:
comp_rgb = comp_rgb + (1.0 - acc[..., None])
return comp_rgb, acc, weights | null |
20,343 | import numpy as np
import torch
import torch.nn.functional as F
def cast_rays(t_vals, origins, directions):
def sorted_piecewise_constant_pdf(
bins, weights, num_samples, randomized, float_min_eps=2**-32
):
def sample_pdf(bins, weights, origins, directions, t_vals, num_samples, randomized):
t_samples = sorted_piecewise_constant_pdf(
bins, weights, num_samples, randomized
).detach()
t_vals = torch.sort(torch.cat([t_vals, t_samples], dim=-1), dim=-1).values
coords = cast_rays(t_vals, origins, directions)
return t_vals, coords | null |
20,344 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `reflect` function. Write a Python function `def reflect(viewdirs, normals)` to solve the following problem:
Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions.
Here is the function:
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * torch.sum(normals * viewdirs, dim=-1, keepdims=True) * normals - viewdirs
) | Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. |
20,345 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `l2_normalize` function. Write a Python function `def l2_normalize(x, eps=torch.finfo(torch.float32).eps)` to solve the following problem:
Normalize x to unit length along last axis.
Here is the function:
def l2_normalize(x, eps=torch.finfo(torch.float32).eps):
"""Normalize x to unit length along last axis."""
return x / torch.sqrt(
torch.fmax(torch.sum(x**2, dim=-1, keepdims=True), torch.full_like(x, eps))
) | Normalize x to unit length along last axis. |
20,346 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `compute_weighted_mae` function. Write a Python function `def compute_weighted_mae(weights, normals, normals_gt)` to solve the following problem:
Compute weighted mean angular error, assuming normals are unit length.
Here is the function:
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
one_eps = 1 - torch.finfo(torch.float32).eps
return (
(
weights
* torch.arccos(
torch.clamp(torch.sum(normals * normals_gt, -1), -one_eps, one_eps)
)
).sum()
/ torch.sum(weights)
* 180.0
/ np.pi
) | Compute weighted mean angular error, assuming normals are unit length. |
20,347 | import numpy as np
import torch
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError("Only deg_view of at most 5 is numerically stable.")
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
mat = torch.Tensor(mat)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[..., 0:1]
y = xyz[..., 1:2]
z = xyz[..., 2:3]
vmz = torch.cat([z**i for i in range(mat.shape[0])], dim=-1)
vmxy = torch.cat([(x + 1j * y) ** m for m in ml_array[0, :]], dim=-1)
sph_harms = vmxy * torch.matmul(vmz, mat.to(xyz.device))
sigma = torch.Tensor(0.5 * ml_array[1, :] * (ml_array[1, :] + 1)).to(
kappa_inv.device
)
ide = sph_harms * torch.exp(-sigma * kappa_inv)
return torch.cat([torch.real(ide), torch.imag(ide)], dim=-1)
return integrated_dir_enc_fn
The provided code snippet includes necessary dependencies for implementing the `generate_dir_enc_fn` function. Write a Python function `def generate_dir_enc_fn(deg_view)` to solve the following problem:
Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding.
Here is the function:
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, torch.zeros_like(xyz[..., :1]))
return dir_enc_fn | Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. |
20,348 | import itertools
import numpy as np
import torch
def img2mse(x, y):
return torch.mean((x - y) ** 2) | null |
20,349 | import itertools
import numpy as np
import torch
def mse2psnr(x):
return -10.0 * torch.log(x) / np.log(10) | null |
20,350 | import itertools
import numpy as np
import torch
def linear_to_srgb(linear, eps=1e-10):
eps = torch.finfo(torch.float32).eps
srgb0 = 323 / 25 * linear
srgb1 = (
211 * torch.fmax(torch.full_like(linear, eps), linear) ** (5 / 12) - 11
) / 200
return torch.where(linear <= 0.0031308, srgb0, srgb1) | null |
20,351 | import itertools
import numpy as np
import torch
def cast_rays(t_vals, origins, directions, radii, ray_shape):
def sample_along_rays(
rays_o,
rays_d,
radii,
num_samples,
near,
far,
randomized,
lindisp,
ray_shape,
):
bsz = rays_o.shape[0]
t_vals = torch.linspace(0.0, 1.0, num_samples + 1, device=rays_o.device)
if lindisp:
t_vals = 1.0 / (1.0 / near * (1.0 - t_vals) + 1.0 / far * t_vals)
else:
t_vals = near * (1.0 - t_vals) + far * t_vals
if randomized:
mids = 0.5 * (t_vals[..., 1:] + t_vals[..., :-1])
upper = torch.cat([mids, t_vals[..., -1:]], -1)
lower = torch.cat([t_vals[..., :1], mids], -1)
t_rand = torch.rand((bsz, num_samples + 1), device=rays_o.device)
t_vals = lower + (upper - lower) * t_rand
else:
t_vals = torch.broadcast_to(t_vals, (bsz, num_samples + 1))
means, covs = cast_rays(t_vals, rays_o, rays_d, radii, ray_shape)
return t_vals, (means, covs) | null |
20,352 | import itertools
import numpy as np
import torch
def sorted_piecewise_constant_pdf(
bins, weights, num_samples, randomized, float_min_eps=2**-32
):
eps = 1e-5
weight_sum = weights.sum(dim=-1, keepdims=True)
padding = torch.fmax(torch.zeros_like(weight_sum), eps - weight_sum)
weights += padding / weights.shape[-1]
weight_sum += padding
pdf = weights / weight_sum
cdf = torch.fmin(
torch.ones_like(pdf[..., :-1]), torch.cumsum(pdf[..., :-1], dim=-1)
)
cdf = torch.cat(
[
torch.zeros(list(cdf.shape[:-1]) + [1], device=weights.device),
cdf,
torch.ones(list(cdf.shape[:-1]) + [1], device=weights.device),
],
dim=-1,
)
s = 1 / num_samples
u = torch.arange(num_samples, device=weights.device) * s
u += torch.rand_like(u) * (s - float_min_eps)
u = torch.fmin(u, torch.ones_like(u) * (1.0 - float_min_eps))
mask = u[..., None, :] >= cdf[..., :, None]
bin0 = (mask * bins[..., None] + ~mask * bins[..., :1, None]).max(dim=-2)[0]
bin1 = (~mask * bins[..., None] + mask * bins[..., -1:, None]).min(dim=-2)[0]
# Debug Here
cdf0 = (mask * cdf[..., None] + ~mask * cdf[..., :1, None]).max(dim=-2)[0]
cdf1 = (~mask * cdf[..., None] + mask * cdf[..., -1:, None]).min(dim=-2)[0]
t = torch.clip(torch.nan_to_num((u - cdf0) / (cdf1 - cdf0), 0), 0, 1)
samples = bin0 + t * (bin1 - bin0)
return samples
def cast_rays(t_vals, origins, directions, radii, ray_shape):
t0 = t_vals[..., :-1]
t1 = t_vals[..., 1:]
if ray_shape == "cone":
gaussian_fn = conical_frustum_to_gaussian
elif ray_shape == "cylinder":
gaussian_fn = cylinder_to_gaussian
else:
assert False
means, covs = gaussian_fn(directions, t0, t1, radii)
means = means + origins[..., None, :]
return means, covs
def resample_along_rays(
rays_o,
rays_d,
radii,
t_vals,
weights,
randomized,
ray_shape,
stop_level_grad,
resample_padding,
):
weights_pad = torch.cat([weights[..., :1], weights, weights[..., -1:]], dim=-1)
weights_max = torch.fmax(weights_pad[..., :-1], weights_pad[..., 1:])
weights_blur = 0.5 * (weights_max[..., :-1] + weights_max[..., 1:])
weights = weights_blur + resample_padding
new_t_vals = sorted_piecewise_constant_pdf(
t_vals, weights, t_vals.shape[-1], randomized
)
if stop_level_grad:
new_t_vals = new_t_vals.detach()
means, covs = cast_rays(new_t_vals, rays_o, rays_d, radii, ray_shape)
return new_t_vals, (means, covs) | null |
20,353 | import itertools
import numpy as np
import torch
def expected_sin(x, x_var):
def integrated_pos_enc(means, covs, min_deg, max_deg):
scales = torch.tensor([2**i for i in range(min_deg, max_deg)]).type_as(means)
shape = list(means.shape[:-1]) + [-1]
scaled_means = torch.reshape(means[..., None, :] * scales[:, None], shape)
scaled_covs = torch.reshape(covs[..., None, :] * scales[:, None] ** 2, shape)
return expected_sin(
torch.cat([scaled_means, scaled_means + 0.5 * np.pi], dim=-1),
torch.cat([scaled_covs] * 2, dim=-1),
)[0] | null |
20,354 | import itertools
import numpy as np
import torch
def volumetric_rendering(rgb, density, t_vals, dirs, white_bkgd):
t_mids = 0.5 * (t_vals[..., :-1] + t_vals[..., 1:])
t_dists = t_vals[..., 1:] - t_vals[..., :-1]
delta = t_dists * torch.norm(dirs[..., None, :], dim=-1)
# Note that we're quietly turning density from [..., 0] to [...].
density_delta = density[..., 0] * delta
alpha = 1 - torch.exp(-density_delta)
trans = torch.exp(
-torch.cat(
[
torch.zeros_like(density_delta[..., :1]),
torch.cumsum(density_delta[..., :-1], dim=-1),
],
dim=-1,
)
)
weights = alpha * trans
comp_rgb = (weights[..., None] * rgb).sum(dim=-2)
acc = weights.sum(dim=-1)
distance = (weights * t_mids).sum(dim=-1) / acc
distance = torch.clip(distance, t_vals[:, 0], t_vals[:, -1])
if white_bkgd:
comp_rgb = comp_rgb + (1.0 - acc[..., None])
return comp_rgb, distance, acc, weights | null |
20,355 | import itertools
import numpy as np
import torch
def pos_enc(x, min_deg, max_deg, append_identity):
scales = torch.tensor([2**i for i in range(min_deg, max_deg)]).type_as(x)
xb = torch.reshape((x[..., None, :] * scales[:, None]), list(x.shape[:-1]) + [-1])
four_feat = torch.sin(torch.cat([xb, xb + 0.5 * np.pi], dim=-1))
if append_identity:
return torch.cat([x] + [four_feat], dim=-1)
else:
return four_feat | null |
20,356 | import itertools
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `lift_and_diagonalize` function. Write a Python function `def lift_and_diagonalize(samples, basis)` to solve the following problem:
Project `mean` and `cov` onto basis and diagonalize the projected cov.
Here is the function:
def lift_and_diagonalize(samples, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
mean, cov = samples
fn_mean = torch.matmul(mean, basis.to(mean.device))
fn_cov_diag = torch.sum(basis * torch.matmul(cov, basis.to(cov.device)), dim=-2)
return (fn_mean, fn_cov_diag) | Project `mean` and `cov` onto basis and diagonalize the projected cov. |
20,357 | import itertools
import numpy as np
import torch
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f"v {v} must an integer")
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
The provided code snippet includes necessary dependencies for implementing the `generate_basis` function. Write a Python function `def generate_basis(base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4)` to solve the following problem:
Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n].
Here is the function:
def generate_basis(base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == "icosahedron":
a = (np.sqrt(5) + 1) / 2
verts = np.array(
[
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]
) / np.sqrt(a + 2)
faces = np.array(
[
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
]
)
verts = tesselate_geodesic(verts, faces, angular_tesselation)
elif base_shape == "octahedron":
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
verts = tesselate_geodesic(verts, faces, angular_tesselation)
else:
raise ValueError(f"base_shape {base_shape} not supported")
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[np.any(np.triu(match), 1), :]
basis = verts[:, ::-1]
return basis | Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. |
20,358 | import argparse
import logging
import os
import shutil
from typing import *
import gin
import torch
from pytorch_lightning import Trainer
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import (
LearningRateMonitor,
ModelCheckpoint,
TQDMProgressBar,
)
from pytorch_lightning.plugins import DDPPlugin
from utils.select_option import select_callback, select_dataset, select_model
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise Exception("Boolean value expected.") | null |
20,359 | import argparse
import logging
import os
import shutil
from typing import *
import gin
import torch
from pytorch_lightning import Trainer
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import (
LearningRateMonitor,
ModelCheckpoint,
TQDMProgressBar,
)
from pytorch_lightning.plugins import DDPPlugin
from utils.select_option import select_callback, select_dataset, select_model
def select_model(
model_name: str,
):
if model_name == "nerf":
return LitNeRF()
elif model_name == "mipnerf":
return LitMipNeRF()
elif model_name == "plenoxel":
return LitPlenoxel()
elif model_name == "nerfpp":
return LitNeRFPP()
elif model_name == "dvgo":
return LitDVGO()
elif model_name == "refnerf":
return LitRefNeRF()
elif model_name == "mipnerf360":
return LitMipNeRF360()
else:
raise f"Unknown model named {model_name}"
def select_dataset(
dataset_name: str,
datadir: str,
scene_name: str,
):
if dataset_name == "blender":
data_fun = LitDataBlender
elif dataset_name == "blender_multiscale":
data_fun = LitDataBlenderMultiScale
elif dataset_name == "llff":
data_fun = LitDataLLFF
elif dataset_name == "tanks_and_temples":
data_fun = LitDataTnT
elif dataset_name == "lf":
data_fun = LitDataLF
elif dataset_name == "nerf_360_v2":
data_fun = LitDataNeRF360V2
elif dataset_name == "shiny_blender":
data_fun = LitDataShinyBlender
elif dataset_name == "refnerf_real":
data_fun = LitDataRefNeRFReal
return data_fun(
datadir=datadir,
scene_name=scene_name,
)
def select_callback(model_name):
callbacks = []
if model_name == "plenoxel":
import src.model.plenoxel.model as model
callbacks += [model.ResampleCallBack()]
if model_name == "dvgo":
import src.model.dvgo.model as model
callbacks += [
model.Coarse2Fine(),
model.ProgressiveScaling(),
model.UpdateOccupancyMask(),
]
return callbacks
def run(
ginc: str,
ginb: str,
resume_training: bool,
ckpt_path: Optional[str],
scene_name: Optional[str],
datadir: Optional[str] = None,
logbase: Optional[str] = None,
model_name: Optional[str] = None,
dataset_name: Optional[str] = None,
postfix: Optional[str] = None,
entity: Optional[str] = None,
# Optimization
max_steps: int = -1,
max_epochs: int = -1,
precision: int = 32,
# Logging
log_every_n_steps: int = 1000,
progressbar_refresh_rate: int = 5,
# Run Mode
run_train: bool = True,
run_eval: bool = True,
run_render: bool = False,
num_devices: Optional[int] = None,
num_sanity_val_steps: int = 0,
seed: int = 777,
debug: bool = False,
save_last: bool = True,
grad_max_norm=0.0,
grad_clip_algorithm="norm",
):
logging.getLogger("lightning").setLevel(logging.ERROR)
datadir = datadir.rstrip("/")
exp_name = (
model_name + "_" + dataset_name + "_" + scene_name + "_" + str(seed).zfill(3)
)
if postfix is not None:
exp_name += "_" + postfix
if debug:
exp_name += "_debug"
if num_devices is None:
num_devices = torch.cuda.device_count()
if model_name in ["plenoxel"]:
num_devices = 1
if logbase is None:
logbase = "logs"
os.makedirs(logbase, exist_ok=True)
logdir = os.path.join(logbase, exp_name)
os.makedirs(logdir, exist_ok=True)
os.makedirs(os.path.join(logdir, exp_name), exist_ok=True)
logger = pl_loggers.TensorBoardLogger(
save_dir=logdir,
name=exp_name,
)
# Logging all parameters
if run_train:
txt_path = os.path.join(logdir, "config.gin")
with open(txt_path, "w") as fp_txt:
for config_path in ginc:
fp_txt.write(f"Config from {config_path}\n\n")
with open(config_path, "r") as fp_config:
readlines = fp_config.readlines()
for line in readlines:
fp_txt.write(line)
fp_txt.write("\n")
fp_txt.write("\n### Binded options\n")
for line in ginb:
fp_txt.write(line + "\n")
seed_everything(seed, workers=True)
lr_monitor = LearningRateMonitor(logging_interval="step")
model_checkpoint = ModelCheckpoint(
monitor="val/psnr",
dirpath=logdir,
filename="best",
save_top_k=1,
mode="max",
save_last=save_last,
)
tqdm_progrss = TQDMProgressBar(refresh_rate=progressbar_refresh_rate)
callbacks = []
if not model_name in ["plenoxel"]:
callbacks.append(lr_monitor)
callbacks += [model_checkpoint, tqdm_progrss]
callbacks += select_callback(model_name)
ddp_plugin = DDPPlugin(find_unused_parameters=False) if num_devices > 1 else None
trainer = Trainer(
logger=logger if run_train else None,
log_every_n_steps=log_every_n_steps,
devices=num_devices,
max_epochs=max_epochs,
max_steps=max_steps,
accelerator="gpu",
replace_sampler_ddp=False,
strategy=ddp_plugin,
check_val_every_n_epoch=1,
precision=precision,
num_sanity_val_steps=num_sanity_val_steps,
callbacks=callbacks,
gradient_clip_algorithm=grad_clip_algorithm,
gradient_clip_val=grad_max_norm,
)
if resume_training:
if ckpt_path is None:
ckpt_path = f"{logdir}/last.ckpt"
data_module = select_dataset(
dataset_name=dataset_name,
scene_name=scene_name,
datadir=datadir,
)
model = select_model(model_name=model_name)
model.logdir = logdir
if run_train:
best_ckpt = os.path.join(logdir, "best.ckpt")
if os.path.exists(best_ckpt):
os.remove(best_ckpt)
version0 = os.path.join(logdir, exp_name, "version_0")
if os.path.exists(version0):
shutil.rmtree(version0, True)
trainer.fit(model, data_module, ckpt_path=ckpt_path)
if run_eval:
ckpt_path = (
f"{logdir}/best.ckpt"
if model_name != "mipnerf360"
else f"{logdir}/last.ckpt"
)
trainer.test(model, data_module, ckpt_path=ckpt_path)
if run_render:
ckpt_path = (
f"{logdir}/best.ckpt"
if model_name != "mipnerf360"
else f"{logdir}/last.ckpt"
)
trainer.predict(model, data_module, ckpt_path=ckpt_path) | null |
20,360 | import os
import imageio
import numpy as np
from PIL import Image
def to8b(x):
def norm8b(x):
x = (x - x.min()) / (x.max() - x.min())
return to8b(x) | null |
20,361 | import os
import imageio
import numpy as np
from PIL import Image
def to8b(x):
def store_image(dirpath, rgbs):
for (i, rgb) in enumerate(rgbs):
imgname = f"image{str(i).zfill(3)}.png"
rgbimg = Image.fromarray(to8b(rgb.detach().cpu().numpy()))
imgpath = os.path.join(dirpath, imgname)
rgbimg.save(imgpath) | null |
20,362 | import os
import imageio
import numpy as np
from PIL import Image
def to8b(x):
return (255 * np.clip(x, 0, 1)).astype(np.uint8)
def store_video(dirpath, rgbs, depths):
rgbimgs = [to8b(rgb.cpu().detach().numpy()) for rgb in rgbs]
video_dir = os.path.join(dirpath, "videos")
os.makedirs(video_dir, exist_ok=True)
imageio.mimwrite(os.path.join(video_dir, "images.mp4"), rgbimgs, fps=20, quality=8) | null |
20,363 | import os
import torch
import warnings
import numpy as np
import random
from time import sleep
from random import randint
import src.utils.logging as logging
from src.configs.config import get_cfg
from src.data import loader as data_loader
from src.engine.evaluator import Evaluator
from src.engine.trainer import Trainer
from src.models.build_model import build_model
from src.utils.file_io import PathManager
from launch import default_argument_parser, logging_train_setup
def get_loaders(cfg, logger):
logger.info("Loading training data (final training data for vtab)...")
if cfg.DATA.NAME.startswith("vtab-"):
train_loader = data_loader.construct_trainval_loader(cfg)
else:
train_loader = data_loader.construct_train_loader(cfg)
logger.info("Loading validation data...")
# not really needed for vtab
val_loader = data_loader.construct_val_loader(cfg)
logger.info("Loading test data...")
if cfg.DATA.NO_TEST:
logger.info("...no test data is constructed")
test_loader = None
else:
test_loader = data_loader.construct_test_loader(cfg)
return train_loader, val_loader, test_loader
class Evaluator():
"""
An evaluator with below logics:
1. find which eval module to use.
2. store the eval results, pretty print it in log file as well.
"""
def __init__(
self,
) -> None:
self.results = defaultdict(dict)
self.iteration = -1
self.threshold_end = 0.5
def update_iteration(self, iteration: int) -> None:
"""update iteration info"""
self.iteration = iteration
def update_result(self, metric: str, value: Union[float, dict]) -> None:
if self.iteration > -1:
key_name = "epoch_" + str(self.iteration)
else:
key_name = "final"
if isinstance(value, float):
self.results[key_name].update({metric: value})
else:
if metric in self.results[key_name]:
self.results[key_name][metric].update(value)
else:
self.results[key_name].update({metric: value})
def classify(self, probs, targets, test_data, multilabel=False):
"""
Evaluate classification result.
Args:
probs: np.ndarray for num_data x num_class, predicted probabilities
targets: np.ndarray for multilabel, list of integers for single label
test_labels: map test image ids to a list of class labels
"""
if not targets:
raise ValueError(
"When evaluating classification, need at least give targets")
if multilabel:
self._eval_multilabel(probs, targets, test_data)
else:
self._eval_singlelabel(probs, targets, test_data)
def _eval_singlelabel(
self,
scores: np.ndarray,
targets: List[int],
eval_type: str
) -> None:
"""
if number of labels > 2:
top1 and topk (5 by default) accuracy
if number of labels == 2:
top1 and rocauc
"""
acc_dict = singlelabel.compute_acc_auc(scores, targets)
log_results = {
k: np.around(v * 100, decimals=2) for k, v in acc_dict.items()
}
save_results = acc_dict
self.log_and_update(log_results, save_results, eval_type)
def _eval_multilabel(
self,
scores: np.ndarray,
targets: np.ndarray,
eval_type: str
) -> None:
num_labels = scores.shape[-1]
targets = multilabel.multihot(targets, num_labels)
log_results = {}
ap, ar, mAP, mAR = multilabel.compute_map(scores, targets)
f1_dict = multilabel.get_best_f1_scores(
targets, scores, self.threshold_end)
log_results["mAP"] = np.around(mAP * 100, decimals=2)
log_results["mAR"] = np.around(mAR * 100, decimals=2)
log_results.update({
k: np.around(v * 100, decimals=2) for k, v in f1_dict.items()})
save_results = {
"ap": ap, "ar": ar, "mAP": mAP, "mAR": mAR, "f1": f1_dict
}
self.log_and_update(log_results, save_results, eval_type)
def log_and_update(self, log_results, save_results, eval_type):
log_str = ""
for k, result in log_results.items():
if not isinstance(result, np.ndarray):
log_str += f"{k}: {result:.2f}\t"
else:
log_str += f"{k}: {list(result)}\t"
logger.info(f"Classification results with {eval_type}: {log_str}")
# save everything
self.update_result("classification", {eval_type: save_results})
class Trainer():
"""
a trainer with below logics:
1. Build optimizer, scheduler
2. Load checkpoints if provided
3. Train and eval at each epoch
"""
def __init__(
self,
cfg: CfgNode,
model: nn.Module,
evaluator: Evaluator,
device: torch.device,
) -> None:
self.cfg = cfg
self.model = model
self.device = device
# solver related
logger.info("\tSetting up the optimizer...")
self.optimizer = make_optimizer([self.model], cfg.SOLVER)
self.scheduler = make_scheduler(self.optimizer, cfg.SOLVER)
self.cls_criterion = build_loss(self.cfg)
self.checkpointer = Checkpointer(
self.model,
save_dir=cfg.OUTPUT_DIR,
save_to_disk=True
)
if len(cfg.MODEL.WEIGHT_PATH) > 0:
# only use this for vtab in-domain experiments
checkpointables = [key for key in self.checkpointer.checkpointables if key not in ["head.last_layer.bias", "head.last_layer.weight"]]
self.checkpointer.load(cfg.MODEL.WEIGHT_PATH, checkpointables)
logger.info(f"Model weight loaded from {cfg.MODEL.WEIGHT_PATH}")
self.evaluator = evaluator
self.cpu_device = torch.device("cpu")
def forward_one_batch(self, inputs, targets, is_train):
"""Train a single (full) epoch on the model using the given
data loader.
Args:
X: input dict
targets
is_train: bool
Returns:
loss
outputs: output logits
"""
# move data to device
inputs = inputs.to(self.device, non_blocking=True) # (batchsize, 2048)
targets = targets.to(self.device, non_blocking=True) # (batchsize, )
if self.cfg.DBG:
logger.info(f"shape of inputs: {inputs.shape}")
logger.info(f"shape of targets: {targets.shape}")
# forward
with torch.set_grad_enabled(is_train):
outputs = self.model(inputs) # (batchsize, num_cls)
if self.cfg.DBG:
logger.info(
"shape of model output: {}, targets: {}".format(
outputs.shape, targets.shape))
if self.cls_criterion.is_local() and is_train:
self.model.eval()
loss = self.cls_criterion(
outputs, targets, self.cls_weights,
self.model, inputs
)
elif self.cls_criterion.is_local():
return torch.tensor(1), outputs
else:
loss = self.cls_criterion(
outputs, targets, self.cls_weights)
if loss == float('inf'):
logger.info(
"encountered infinite loss, skip gradient updating for this batch!"
)
return -1, -1
elif torch.isnan(loss).any():
logger.info(
"encountered nan loss, skip gradient updating for this batch!"
)
return -1, -1
# =======backward and optim step only if in training phase... =========
if is_train:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss, outputs
def get_input(self, data):
if not isinstance(data["image"], torch.Tensor):
for k, v in data.items():
data[k] = torch.from_numpy(v)
inputs = data["image"].float()
labels = data["label"]
return inputs, labels
def train_classifier(self, train_loader, val_loader, test_loader):
"""
Train a classifier using epoch
"""
# save the model prompt if required before training
self.model.eval()
self.save_prompt(0)
# setup training epoch params
total_epoch = self.cfg.SOLVER.TOTAL_EPOCH
total_data = len(train_loader)
best_epoch = -1
best_metric = 0
log_interval = self.cfg.SOLVER.LOG_EVERY_N
losses = AverageMeter('Loss', ':.4e')
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
self.cls_weights = train_loader.dataset.get_class_weights(
self.cfg.DATA.CLASS_WEIGHTS_TYPE)
# logger.info(f"class weights: {self.cls_weights}")
patience = 0 # if > self.cfg.SOLVER.PATIENCE, stop training
for epoch in range(total_epoch):
# reset averagemeters to measure per-epoch results
losses.reset()
batch_time.reset()
data_time.reset()
lr = self.scheduler.get_lr()[0]
logger.info(
"Training {} / {} epoch, with learning rate {}".format(
epoch + 1, total_epoch, lr
)
)
# Enable training mode
self.model.train()
end = time.time()
for idx, input_data in enumerate(train_loader):
if self.cfg.DBG and idx == 20:
# if debugging, only need to see the first few iterations
break
X, targets = self.get_input(input_data)
# logger.info(X.shape)
# logger.info(targets.shape)
# measure data loading time
data_time.update(time.time() - end)
train_loss, _ = self.forward_one_batch(X, targets, True)
if train_loss == -1:
# continue
return None
losses.update(train_loss.item(), X.shape[0])
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# log during one batch
if (idx + 1) % log_interval == 0:
seconds_per_batch = batch_time.val
eta = datetime.timedelta(seconds=int(
seconds_per_batch * (total_data - idx - 1) + seconds_per_batch*total_data*(total_epoch-epoch-1)))
logger.info(
"\tTraining {}/{}. train loss: {:.4f},".format(
idx + 1,
total_data,
train_loss
)
+ "\t{:.4f} s / batch. (data: {:.2e}). ETA={}, ".format(
seconds_per_batch,
data_time.val,
str(eta),
)
+ "max mem: {:.1f} GB ".format(gpu_mem_usage())
)
logger.info(
"Epoch {} / {}: ".format(epoch + 1, total_epoch)
+ "avg data time: {:.2e}, avg batch time: {:.4f}, ".format(
data_time.avg, batch_time.avg)
+ "average train loss: {:.4f}".format(losses.avg))
# update lr, scheduler.step() must be called after optimizer.step() according to the docs: https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate # noqa
self.scheduler.step()
# Enable eval mode
self.model.eval()
self.save_prompt(epoch + 1)
# eval at each epoch for single gpu training
self.evaluator.update_iteration(epoch)
self.eval_classifier(val_loader, "val", epoch == total_epoch - 1)
if test_loader is not None:
self.eval_classifier(
test_loader, "test", epoch == total_epoch - 1)
# check the patience
t_name = "val_" + val_loader.dataset.name
try:
curr_acc = self.evaluator.results[f"epoch_{epoch}"]["classification"][t_name]["top1"]
except KeyError:
return
if curr_acc > best_metric:
best_metric = curr_acc
best_epoch = epoch + 1
logger.info(
f'Best epoch {best_epoch}: best metric: {best_metric:.3f}')
patience = 0
else:
patience += 1
if patience >= self.cfg.SOLVER.PATIENCE:
logger.info("No improvement. Breaking out of loop.")
break
# save the last checkpoints
# if self.cfg.MODEL.SAVE_CKPT:
# Checkpointer(
# self.model,
# save_dir=self.cfg.OUTPUT_DIR,
# save_to_disk=True
# ).save("last_model")
def save_prompt(self, epoch):
# only save the prompt embed if below conditions are satisfied
if self.cfg.MODEL.PROMPT.SAVE_FOR_EACH_EPOCH:
if self.cfg.MODEL.TYPE == "vit" and "prompt" in self.cfg.MODEL.TRANSFER_TYPE:
prompt_embds = self.model.enc.transformer.prompt_embeddings.cpu().numpy()
out = {"shallow_prompt": prompt_embds}
if self.cfg.MODEL.PROMPT.DEEP:
deep_embds = self.model.enc.transformer.deep_prompt_embeddings.cpu().numpy()
out["deep_prompt"] = deep_embds
torch.save(out, os.path.join(
self.cfg.OUTPUT_DIR, f"prompt_ep{epoch}.pth"))
def eval_classifier(self, data_loader, prefix, save=False):
"""evaluate classifier"""
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
log_interval = self.cfg.SOLVER.LOG_EVERY_N
test_name = prefix + "_" + data_loader.dataset.name
total = len(data_loader)
# initialize features and target
total_logits = []
total_targets = []
for idx, input_data in enumerate(data_loader):
end = time.time()
X, targets = self.get_input(input_data)
# measure data loading time
data_time.update(time.time() - end)
if self.cfg.DBG:
logger.info("during eval: {}".format(X.shape))
loss, outputs = self.forward_one_batch(X, targets, False)
if loss == -1:
return
losses.update(loss, X.shape[0])
# measure elapsed time
batch_time.update(time.time() - end)
if (idx + 1) % log_interval == 0:
logger.info(
"\tTest {}/{}. loss: {:.3f}, {:.4f} s / batch. (data: {:.2e})".format( # noqa
idx + 1,
total,
losses.val,
batch_time.val,
data_time.val
) + "max mem: {:.5f} GB ".format(gpu_mem_usage())
)
# targets: List[int]
total_targets.extend(list(targets.numpy()))
total_logits.append(outputs)
logger.info(
f"Inference ({prefix}):"
+ "avg data time: {:.2e}, avg batch time: {:.4f}, ".format(
data_time.avg, batch_time.avg)
+ "average loss: {:.4f}".format(losses.avg))
if self.model.side is not None:
logger.info(
"--> side tuning alpha = {:.4f}".format(self.model.side_alpha))
# total_testimages x num_classes
joint_logits = torch.cat(total_logits, dim=0).cpu().numpy()
self.evaluator.classify(
joint_logits, total_targets,
test_name, self.cfg.DATA.MULTILABEL,
)
# save the probs and targets
if save and self.cfg.MODEL.SAVE_CKPT:
out = {"targets": total_targets, "joint_logits": joint_logits}
out_path = os.path.join(
self.cfg.OUTPUT_DIR, f"{test_name}_logits.pth")
torch.save(out, out_path)
logger.info(
f"Saved logits and targets for {test_name} at {out_path}")
def build_model(cfg):
"""
build model here
"""
assert (
cfg.MODEL.TYPE in _MODEL_TYPES.keys()
), "Model type '{}' not supported".format(cfg.MODEL.TYPE)
assert (
cfg.NUM_GPUS <= torch.cuda.device_count()
), "Cannot use more GPU devices than available"
# Construct the model
train_type = cfg.MODEL.TYPE
model = _MODEL_TYPES[train_type](cfg)
log_model_info(model, verbose=cfg.DBG)
model, device = load_model_to_device(model, cfg)
logger.info(f"Device used for model: {device}")
return model, device
def logging_train_setup(args, cfg) -> None:
output_dir = cfg.OUTPUT_DIR
if output_dir:
PathManager.mkdirs(output_dir)
logger = logging.setup_logging(
cfg.NUM_GPUS, get_world_size(), output_dir, name="visual_prompt")
# Log basic information about environment, cmdline arguments, and config
rank = get_rank()
logger.info(
f"Rank of current process: {rank}. World size: {get_world_size()}")
logger.info("Environment info:\n" + collect_env_info())
logger.info("Command line arguments: " + str(args))
if hasattr(args, "config_file") and args.config_file != "":
logger.info(
"Contents of args.config_file={}:\n{}".format(
args.config_file,
PathManager.open(args.config_file, "r").read()
)
)
# Show the config
logger.info("Training with config:")
logger.info(pprint.pformat(cfg))
# cudnn benchmark has large overhead.
# It shouldn't be used considering the small size of typical val set.
if not (hasattr(args, "eval_only") and args.eval_only):
torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
def train(cfg, args):
# clear up residual cache from previous runs
if torch.cuda.is_available():
torch.cuda.empty_cache()
# main training / eval actions here
# fix the seed for reproducibility
if cfg.SEED is not None:
torch.manual_seed(cfg.SEED)
np.random.seed(cfg.SEED)
random.seed(0)
# setup training env including loggers
logging_train_setup(args, cfg)
logger = logging.get_logger("visual_prompt")
train_loader, val_loader, test_loader = get_loaders(cfg, logger)
logger.info("Constructing models...")
model, cur_device = build_model(cfg)
logger.info("Setting up Evalutator...")
evaluator = Evaluator()
logger.info("Setting up Trainer...")
trainer = Trainer(cfg, model, evaluator, cur_device)
if train_loader:
trainer.train_classifier(train_loader, val_loader, test_loader)
else:
print("No train loader presented. Exit")
if cfg.SOLVER.TOTAL_EPOCH == 0:
trainer.eval_classifier(test_loader, "test", 0) | null |
20,364 | import os
import warnings
from time import sleep
from random import randint
from src.configs.config import get_cfg
from src.utils.file_io import PathManager
from train import train as train_main
from launch import default_argument_parser
def setup(args, lr, wd, check_runtime=True):
"""
Create configs and perform basic setups.
overwrite the 2 parameters in cfg and args
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# setup dist
cfg.DIST_INIT_PATH = "tcp://{}:4000".format(os.environ["SLURMD_NODENAME"])
# overwrite below four parameters
lr = lr / 256 * cfg.DATA.BATCH_SIZE # update lr based on the batchsize
cfg.SOLVER.BASE_LR = lr
cfg.SOLVER.WEIGHT_DECAY = wd
# setup output dir
# output_dir / data_name / feature_name / lr_wd / run1
output_dir = cfg.OUTPUT_DIR
output_folder = os.path.join(
cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_wd{wd}"
)
# output_folder = os.path.splitext(os.path.basename(args.config_file))[0]
# train cfg.RUN_N_TIMES times
if check_runtime:
count = 1
while count <= cfg.RUN_N_TIMES:
output_path = os.path.join(output_dir, output_folder, f"run{count}")
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
sleep(randint(1, 5))
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if count > cfg.RUN_N_TIMES:
raise ValueError(
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
else:
# only used for dummy config file
output_path = os.path.join(output_dir, output_folder, f"run1")
cfg.OUTPUT_DIR = output_path
cfg.freeze()
return cfg
def finetune_main(args):
lr_range = [0.001, 0.0001, 0.0005, 0.005]
wd_range = [0.01, 0.001, 0.0001, 0.0]
for wd in wd_range:
for lr in lr_range:
# set up cfg and args
try:
cfg = setup(args, lr, wd)
except ValueError:
continue
train_main(cfg, args) | null |
20,365 | import os
import warnings
from time import sleep
from random import randint
from src.configs.config import get_cfg
from src.utils.file_io import PathManager
from train import train as train_main
from launch import default_argument_parser
def setup(args, lr, wd, check_runtime=True):
"""
Create configs and perform basic setups.
overwrite the 2 parameters in cfg and args
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# setup dist
cfg.DIST_INIT_PATH = "tcp://{}:4000".format(os.environ["SLURMD_NODENAME"])
# overwrite below four parameters
lr = lr / 256 * cfg.DATA.BATCH_SIZE # update lr based on the batchsize
cfg.SOLVER.BASE_LR = lr
cfg.SOLVER.WEIGHT_DECAY = wd
# setup output dir
# output_dir / data_name / feature_name / lr_wd / run1
output_dir = cfg.OUTPUT_DIR
output_folder = os.path.join(
cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_wd{wd}"
)
# output_folder = os.path.splitext(os.path.basename(args.config_file))[0]
# train cfg.RUN_N_TIMES times
if check_runtime:
count = 1
while count <= cfg.RUN_N_TIMES:
output_path = os.path.join(output_dir, output_folder, f"run{count}")
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
sleep(randint(1, 5))
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if count > cfg.RUN_N_TIMES:
raise ValueError(
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
else:
# only used for dummy config file
output_path = os.path.join(output_dir, output_folder, f"run1")
cfg.OUTPUT_DIR = output_path
cfg.freeze()
return cfg
def finetune_rn_main(args):
lr_range = [
0.05, 0.025, 0.005, 0.0025
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
for wd in wd_range:
for lr in lr_range:
# set up cfg and args
try:
cfg = setup(args, lr, wd)
except ValueError as e:
print(e)
continue
train_main(cfg, args) | null |
20,366 | import os
import warnings
from time import sleep
from random import randint
from src.configs.config import get_cfg
from src.utils.file_io import PathManager
from train import train as train_main
from launch import default_argument_parser
def setup(args, lr, wd, check_runtime=True):
"""
Create configs and perform basic setups.
overwrite the 2 parameters in cfg and args
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# setup dist
cfg.DIST_INIT_PATH = "tcp://{}:4000".format(os.environ["SLURMD_NODENAME"])
# overwrite below four parameters
lr = lr / 256 * cfg.DATA.BATCH_SIZE # update lr based on the batchsize
cfg.SOLVER.BASE_LR = lr
cfg.SOLVER.WEIGHT_DECAY = wd
# setup output dir
# output_dir / data_name / feature_name / lr_wd / run1
output_dir = cfg.OUTPUT_DIR
output_folder = os.path.join(
cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_wd{wd}"
)
# output_folder = os.path.splitext(os.path.basename(args.config_file))[0]
# train cfg.RUN_N_TIMES times
if check_runtime:
count = 1
while count <= cfg.RUN_N_TIMES:
output_path = os.path.join(output_dir, output_folder, f"run{count}")
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
sleep(randint(1, 5))
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if count > cfg.RUN_N_TIMES:
raise ValueError(
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
else:
# only used for dummy config file
output_path = os.path.join(output_dir, output_folder, f"run1")
cfg.OUTPUT_DIR = output_path
cfg.freeze()
return cfg
def prompt_rn_main(args):
lr_range = [
0.05, 0.025, 0.01, 0.5, 0.25, 0.1,
1.0, 2.5, 5.
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
for lr in sorted(lr_range, reverse=True):
for wd in wd_range:
# set up cfg and args
try:
cfg = setup(args, lr, wd)
except ValueError as e:
print(e)
continue
train_main(cfg, args) | null |
20,367 | import os
import warnings
from time import sleep
from random import randint
from src.configs.config import get_cfg
from src.utils.file_io import PathManager
from train import train as train_main
from launch import default_argument_parser
def setup(args, lr, wd, check_runtime=True):
"""
Create configs and perform basic setups.
overwrite the 2 parameters in cfg and args
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# setup dist
cfg.DIST_INIT_PATH = "tcp://{}:4000".format(os.environ["SLURMD_NODENAME"])
# overwrite below four parameters
lr = lr / 256 * cfg.DATA.BATCH_SIZE # update lr based on the batchsize
cfg.SOLVER.BASE_LR = lr
cfg.SOLVER.WEIGHT_DECAY = wd
# setup output dir
# output_dir / data_name / feature_name / lr_wd / run1
output_dir = cfg.OUTPUT_DIR
output_folder = os.path.join(
cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_wd{wd}"
)
# output_folder = os.path.splitext(os.path.basename(args.config_file))[0]
# train cfg.RUN_N_TIMES times
if check_runtime:
count = 1
while count <= cfg.RUN_N_TIMES:
output_path = os.path.join(output_dir, output_folder, f"run{count}")
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
sleep(randint(1, 5))
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if count > cfg.RUN_N_TIMES:
raise ValueError(
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
else:
# only used for dummy config file
output_path = os.path.join(output_dir, output_folder, f"run1")
cfg.OUTPUT_DIR = output_path
cfg.freeze()
return cfg
def linear_main(args):
lr_range = [
50.0, 25., 10.0,
5.0, 2.5, 1.0,
0.5, 0.25, 0.1, 0.05
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
for lr in lr_range:
for wd in wd_range:
# set up cfg and args
try:
cfg = setup(args, lr, wd)
except ValueError:
continue
train_main(cfg, args)
sleep(randint(1, 10)) | null |
20,368 | import os
import warnings
from time import sleep
from random import randint
from src.configs.config import get_cfg
from src.utils.file_io import PathManager
from train import train as train_main
from launch import default_argument_parser
def setup(args, lr, wd, check_runtime=True):
"""
Create configs and perform basic setups.
overwrite the 2 parameters in cfg and args
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# setup dist
cfg.DIST_INIT_PATH = "tcp://{}:4000".format(os.environ["SLURMD_NODENAME"])
# overwrite below four parameters
lr = lr / 256 * cfg.DATA.BATCH_SIZE # update lr based on the batchsize
cfg.SOLVER.BASE_LR = lr
cfg.SOLVER.WEIGHT_DECAY = wd
# setup output dir
# output_dir / data_name / feature_name / lr_wd / run1
output_dir = cfg.OUTPUT_DIR
output_folder = os.path.join(
cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_wd{wd}"
)
# output_folder = os.path.splitext(os.path.basename(args.config_file))[0]
# train cfg.RUN_N_TIMES times
if check_runtime:
count = 1
while count <= cfg.RUN_N_TIMES:
output_path = os.path.join(output_dir, output_folder, f"run{count}")
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
sleep(randint(1, 5))
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if count > cfg.RUN_N_TIMES:
raise ValueError(
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
else:
# only used for dummy config file
output_path = os.path.join(output_dir, output_folder, f"run1")
cfg.OUTPUT_DIR = output_path
cfg.freeze()
return cfg
def linear_mae_main(args):
lr_range = [
50.0, 25., 10.0,
5.0, 2.5, 1.0,
0.5, 0.25, 0.1, 0.05,
0.025, 0.005, 0.0025,
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
for lr in lr_range:
for wd in wd_range:
# set up cfg and args
try:
cfg = setup(args, lr, wd)
except ValueError:
continue
train_main(cfg, args)
sleep(randint(1, 10)) | null |
20,369 | import os
import warnings
from time import sleep
from random import randint
from src.configs.config import get_cfg
from src.utils.file_io import PathManager
from train import train as train_main
from launch import default_argument_parser
def setup(args, lr, wd, check_runtime=True):
def prompt_main(args):
lr_range = [
5.0, 2.5, 1.0,
50.0, 25., 10.0,
0.5, 0.25, 0.1,
]
wd_range = [0.01, 0.001, 0.0001, 0.0]
for lr in lr_range:
for wd in wd_range:
# set up cfg and args
try:
cfg = setup(args, lr, wd)
except ValueError:
continue
train_main(cfg, args)
sleep(randint(1, 10)) | null |
20,370 | import os
import warnings
from time import sleep
from random import randint
from src.configs.config import get_cfg
from src.utils.file_io import PathManager
from train import train as train_main
from launch import default_argument_parser
def setup(args, lr, wd, check_runtime=True):
"""
Create configs and perform basic setups.
overwrite the 2 parameters in cfg and args
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# setup dist
cfg.DIST_INIT_PATH = "tcp://{}:4000".format(os.environ["SLURMD_NODENAME"])
# overwrite below four parameters
lr = lr / 256 * cfg.DATA.BATCH_SIZE # update lr based on the batchsize
cfg.SOLVER.BASE_LR = lr
cfg.SOLVER.WEIGHT_DECAY = wd
# setup output dir
# output_dir / data_name / feature_name / lr_wd / run1
output_dir = cfg.OUTPUT_DIR
output_folder = os.path.join(
cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_wd{wd}"
)
# output_folder = os.path.splitext(os.path.basename(args.config_file))[0]
# train cfg.RUN_N_TIMES times
if check_runtime:
count = 1
while count <= cfg.RUN_N_TIMES:
output_path = os.path.join(output_dir, output_folder, f"run{count}")
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
sleep(randint(1, 5))
if not PathManager.exists(output_path):
PathManager.mkdirs(output_path)
cfg.OUTPUT_DIR = output_path
break
else:
count += 1
if count > cfg.RUN_N_TIMES:
raise ValueError(
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
else:
# only used for dummy config file
output_path = os.path.join(output_dir, output_folder, f"run1")
cfg.OUTPUT_DIR = output_path
cfg.freeze()
return cfg
def prompt_main_largerrange(args):
lr_range = [
500, 1000, # for parralel-based prompt for stanford cars
250., 100.0, # for parralel-based prompt for stanford cars
]
wd_range = [0.0, 0.01, 0.001, 0.0001]
for lr in lr_range:
for wd in wd_range:
# set up cfg and args
try:
cfg = setup(args, lr, wd)
except ValueError:
continue
train_main(cfg, args)
sleep(randint(1, 10)) | null |
20,371 | import torchvision as tv
def get_transforms(split, size):
normalize = tv.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
if size == 448:
resize_dim = 512
crop_dim = 448
elif size == 224:
resize_dim = 256
crop_dim = 224
elif size == 384:
resize_dim = 438
crop_dim = 384
if split == "train":
transform = tv.transforms.Compose(
[
tv.transforms.Resize(resize_dim),
tv.transforms.RandomCrop(crop_dim),
tv.transforms.RandomHorizontalFlip(0.5),
# tv.transforms.RandomResizedCrop(224, scale=(0.2, 1.0)),
# tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
normalize,
]
)
else:
transform = tv.transforms.Compose(
[
tv.transforms.Resize(resize_dim),
tv.transforms.CenterCrop(crop_dim),
tv.transforms.ToTensor(),
normalize,
]
)
return transform | null |
20,372 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
def _count_preprocess_fn(x):
return {"image": x["image"],
"label": tf.size(x["objects"]["size"]) - 3} | null |
20,373 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
def _count_cylinders_preprocess_fn(x):
# Class distribution:
num_cylinders = tf.reduce_sum(
tf.cast(tf.equal(x["objects"]["shape"], 2), tf.int32))
return {"image": x["image"], "label": num_cylinders} | null |
20,374 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
def _closest_object_preprocess_fn(x):
dist = tf.reduce_min(x["objects"]["pixel_coords"][:, 2])
# These thresholds are uniformly spaced and result in more or less balanced
# distribution of classes, see the resulting histogram:
thrs = np.array([0.0, 8.0, 8.5, 9.0, 9.5, 10.0, 100.0])
label = tf.reduce_max(tf.where((thrs - dist) < 0))
return {"image": x["image"],
"label": label} | null |
20,375 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
The provided code snippet includes necessary dependencies for implementing the `_count_all_pp` function. Write a Python function `def _count_all_pp(x)` to solve the following problem:
Count all objects.
Here is the function:
def _count_all_pp(x):
"""Count all objects."""
# Count distribution (thresholded at 15):
label = tf.math.minimum(tf.size(x["objects"]["type"]) - 1, 8)
return {"image": x["image"], "label": label} | Count all objects. |
20,376 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
The provided code snippet includes necessary dependencies for implementing the `_count_vehicles_pp` function. Write a Python function `def _count_vehicles_pp(x)` to solve the following problem:
Counting vehicles.
Here is the function:
def _count_vehicles_pp(x):
"""Counting vehicles."""
# Label distribution:
vehicles = tf.where(x["objects"]["type"] < 3) # Car, Van, Truck.
# Cap at 3.
label = tf.math.minimum(tf.size(vehicles), 3)
return {"image": x["image"], "label": label} | Counting vehicles. |
20,377 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
The provided code snippet includes necessary dependencies for implementing the `_count_left_pp` function. Write a Python function `def _count_left_pp(x)` to solve the following problem:
Count objects on the left hand side of the camera.
Here is the function:
def _count_left_pp(x):
"""Count objects on the left hand side of the camera."""
# Count distribution (thresholded at 15):
# Location feature contains (x, y, z) in meters w.r.t. the camera.
objects_on_left = tf.where(x["objects"]["location"][:, 0] < 0)
label = tf.math.minimum(tf.size(objects_on_left), 8)
return {"image": x["image"], "label": label} | Count objects on the left hand side of the camera. |
20,378 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
The provided code snippet includes necessary dependencies for implementing the `_count_far_pp` function. Write a Python function `def _count_far_pp(x)` to solve the following problem:
Counts objects far from the camera.
Here is the function:
def _count_far_pp(x):
"""Counts objects far from the camera."""
# Threshold removes ~half of the objects.
# Count distribution (thresholded at 15):
# Location feature contains (x, y, z) in meters w.r.t. the camera.
distant_objects = tf.where(x["objects"]["location"][:, 2] >= 25)
label = tf.math.minimum(tf.size(distant_objects), 8)
return {"image": x["image"], "label": label} | Counts objects far from the camera. |
20,379 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
The provided code snippet includes necessary dependencies for implementing the `_count_near_pp` function. Write a Python function `def _count_near_pp(x)` to solve the following problem:
Counts objects close to the camera.
Here is the function:
def _count_near_pp(x):
"""Counts objects close to the camera."""
# Threshold removes ~half of the objects.
# Count distribution:
# Location feature contains (x, y, z) in meters w.r.t. the camera.
close_objects = tf.where(x["objects"]["location"][:, 2] < 25)
label = tf.math.minimum(tf.size(close_objects), 8)
return {"image": x["image"], "label": label} | Counts objects close to the camera. |
20,380 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
The provided code snippet includes necessary dependencies for implementing the `_closest_object_distance_pp` function. Write a Python function `def _closest_object_distance_pp(x)` to solve the following problem:
Predict the distance to the closest object.
Here is the function:
def _closest_object_distance_pp(x):
"""Predict the distance to the closest object."""
# Label distribution:
# Location feature contains (x, y, z) in meters w.r.t. the camera.
dist = tf.reduce_min(x["objects"]["location"][:, 2])
thrs = np.array([-100, 5.6, 8.4, 13.4, 23.4])
label = tf.reduce_max(tf.where((thrs - dist) < 0))
return {"image": x["image"], "label": label} | Predict the distance to the closest object. |
20,381 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
The provided code snippet includes necessary dependencies for implementing the `_closest_vehicle_distance_pp` function. Write a Python function `def _closest_vehicle_distance_pp(x)` to solve the following problem:
Predict the distance to the closest vehicle.
Here is the function:
def _closest_vehicle_distance_pp(x):
"""Predict the distance to the closest vehicle."""
# Label distribution:
# Location feature contains (x, y, z) in meters w.r.t. the camera.
vehicles = tf.where(x["objects"]["type"] < 3) # Car, Van, Truck.
vehicle_z = tf.gather(params=x["objects"]["location"][:, 2], indices=vehicles)
vehicle_z = tf.concat([vehicle_z, tf.constant([[1000.0]])], axis=0)
dist = tf.reduce_min(vehicle_z)
# Results in a uniform distribution over three distances, plus one class for
# "no vehicle".
thrs = np.array([-100.0, 8.0, 20.0, 999.0])
label = tf.reduce_max(tf.where((thrs - dist) < 0))
return {"image": x["image"], "label": label} | Predict the distance to the closest vehicle. |
20,382 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from . import base as base
from .registry import Registry
The provided code snippet includes necessary dependencies for implementing the `_closest_object_x_location_pp` function. Write a Python function `def _closest_object_x_location_pp(x)` to solve the following problem:
Predict the absolute x position of the closest object.
Here is the function:
def _closest_object_x_location_pp(x):
"""Predict the absolute x position of the closest object."""
# Label distribution:
# Location feature contains (x, y, z) in meters w.r.t. the camera.
idx = tf.math.argmin(x["objects"]["location"][:, 2])
xloc = x["objects"]["location"][idx, 0]
thrs = np.array([-100, -6.4, -3.5, 0.0, 3.3, 23.9])
label = tf.reduce_max(tf.where((thrs - xloc) < 0))
return {"image": x["image"], "label": label} | Predict the absolute x position of the closest object. |
20,383 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import functools
The provided code snippet includes necessary dependencies for implementing the `partialclass` function. Write a Python function `def partialclass(cls, *base_args, **base_kwargs)` to solve the following problem:
Builds a subclass with partial application of the given args and keywords. Equivalent to functools.partial performance, base_args are preprended to the positional arguments given during object initialization and base_kwargs are updated with the kwargs given later. Args: cls: The base class. *base_args: Positional arguments to be applied to the subclass. **base_kwargs: Keyword arguments to be applied to the subclass. Returns: A subclass of the input class.
Here is the function:
def partialclass(cls, *base_args, **base_kwargs):
"""Builds a subclass with partial application of the given args and keywords.
Equivalent to functools.partial performance, base_args are preprended to the
positional arguments given during object initialization and base_kwargs are
updated with the kwargs given later.
Args:
cls: The base class.
*base_args: Positional arguments to be applied to the subclass.
**base_kwargs: Keyword arguments to be applied to the subclass.
Returns:
A subclass of the input class.
"""
class _NewClass(cls):
def __init__(self, *args, **kwargs):
bound_args = base_args + args
bound_kwargs = base_kwargs.copy()
bound_kwargs.update(kwargs)
super(_NewClass, self).__init__(*bound_args, **bound_kwargs)
return _NewClass | Builds a subclass with partial application of the given args and keywords. Equivalent to functools.partial performance, base_args are preprended to the positional arguments given during object initialization and base_kwargs are updated with the kwargs given later. Args: cls: The base class. *base_args: Positional arguments to be applied to the subclass. **base_kwargs: Keyword arguments to be applied to the subclass. Returns: A subclass of the input class. |
20,384 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import functools
The provided code snippet includes necessary dependencies for implementing the `parse_name` function. Write a Python function `def parse_name(string_to_parse)` to solve the following problem:
Parses input to the registry's lookup function. Args: string_to_parse: can be either an arbitrary name or function call (optionally with positional and keyword arguments). e.g. "multiclass", "resnet50_v2(filters_factor=8)". Returns: A tuple of input name and a dctinary with arguments. Examples: "multiclass" -> ("multiclass", (), {}) "resnet50_v2(9, filters_factor=4)" -> ("resnet50_v2", (9,), {"filters_factor": 4})
Here is the function:
def parse_name(string_to_parse):
"""Parses input to the registry's lookup function.
Args:
string_to_parse: can be either an arbitrary name or function call
(optionally with positional and keyword arguments).
e.g. "multiclass", "resnet50_v2(filters_factor=8)".
Returns:
A tuple of input name and a dctinary with arguments. Examples:
"multiclass" -> ("multiclass", (), {})
"resnet50_v2(9, filters_factor=4)" ->
("resnet50_v2", (9,), {"filters_factor": 4})
"""
expr = ast.parse(string_to_parse, mode="eval").body # pytype: disable=attribute-error
if not isinstance(expr, (ast.Attribute, ast.Call, ast.Name)):
raise ValueError(
"The given string should be a name or a call, but a {} was parsed from "
"the string {!r}".format(type(expr), string_to_parse))
# Notes:
# name="some_name" -> type(expr) = ast.Name
# name="module.some_name" -> type(expr) = ast.Attribute
# name="some_name()" -> type(expr) = ast.Call
# name="module.some_name()" -> type(expr) = ast.Call
if isinstance(expr, ast.Name):
return string_to_parse, {}
elif isinstance(expr, ast.Attribute):
return string_to_parse, {}
def _get_func_name(expr):
if isinstance(expr, ast.Attribute):
return _get_func_name(expr.value) + "." + expr.attr
elif isinstance(expr, ast.Name):
return expr.id
else:
raise ValueError(
"Type {!r} is not supported in a function name, the string to parse "
"was {!r}".format(type(expr), string_to_parse))
def _get_func_args_and_kwargs(call):
args = tuple([ast.literal_eval(arg) for arg in call.args])
kwargs = {
kwarg.arg: ast.literal_eval(kwarg.value) for kwarg in call.keywords
}
return args, kwargs
func_name = _get_func_name(expr.func)
func_args, func_kwargs = _get_func_args_and_kwargs(expr)
if func_args:
raise ValueError("Positional arguments are not supported here, but these "
"were found: {!r}".format(func_args))
return func_name, func_kwargs | Parses input to the registry's lookup function. Args: string_to_parse: can be either an arbitrary name or function call (optionally with positional and keyword arguments). e.g. "multiclass", "resnet50_v2(filters_factor=8)". Returns: A tuple of input name and a dctinary with arguments. Examples: "multiclass" -> ("multiclass", (), {}) "resnet50_v2(9, filters_factor=4)" -> ("resnet50_v2", (9,), {"filters_factor": 4}) |
20,385 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
The provided code snippet includes necessary dependencies for implementing the `make_get_tensors_fn` function. Write a Python function `def make_get_tensors_fn(output_tensors)` to solve the following problem:
Create a function that outputs a collection of tensors from the dataset.
Here is the function:
def make_get_tensors_fn(output_tensors):
"""Create a function that outputs a collection of tensors from the dataset."""
def _get_fn(data):
"""Get tensors by name."""
return {tensor_name: data[tensor_name] for tensor_name in output_tensors}
return _get_fn | Create a function that outputs a collection of tensors from the dataset. |
20,386 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
The provided code snippet includes necessary dependencies for implementing the `make_get_and_cast_tensors_fn` function. Write a Python function `def make_get_and_cast_tensors_fn(output_tensors)` to solve the following problem:
Create a function that gets and casts a set of tensors from the dataset. Optionally, you can also rename the tensors. Examples: # This simply gets "image" and "label" tensors without any casting. # Note that this is equivalent to make_get_tensors_fn(["image", "label"]). make_get_and_cast_tensors_fn({ "image": None, "label": None, }) # This gets the "image" tensor without any type conversion, casts the # "heatmap" tensor to tf.float32, and renames the tensor "class/label" to # "label" and casts it to tf.int64. make_get_and_cast_tensors_fn({ "image": None, "heatmap": tf.float32, "class/label": ("label", tf.int64), }) Args: output_tensors: dictionary specifying the set of tensors to get and cast from the dataset. Returns: The function performing the operation.
Here is the function:
def make_get_and_cast_tensors_fn(output_tensors):
"""Create a function that gets and casts a set of tensors from the dataset.
Optionally, you can also rename the tensors.
Examples:
# This simply gets "image" and "label" tensors without any casting.
# Note that this is equivalent to make_get_tensors_fn(["image", "label"]).
make_get_and_cast_tensors_fn({
"image": None,
"label": None,
})
# This gets the "image" tensor without any type conversion, casts the
# "heatmap" tensor to tf.float32, and renames the tensor "class/label" to
# "label" and casts it to tf.int64.
make_get_and_cast_tensors_fn({
"image": None,
"heatmap": tf.float32,
"class/label": ("label", tf.int64),
})
Args:
output_tensors: dictionary specifying the set of tensors to get and cast
from the dataset.
Returns:
The function performing the operation.
"""
def _tensors_to_cast():
tensors_to_cast = [] # AutoGraph does not support generators.
for tensor_name, tensor_dtype in output_tensors.items():
if isinstance(tensor_dtype, tuple) and len(tensor_dtype) == 2:
tensors_to_cast.append((tensor_name, tensor_dtype[0], tensor_dtype[1]))
elif tensor_dtype is None or isinstance(tensor_dtype, tf.dtypes.DType):
tensors_to_cast.append((tensor_name, tensor_name, tensor_dtype))
else:
raise ValueError('Values of the output_tensors dictionary must be '
'None, tf.dtypes.DType or 2-tuples.')
return tensors_to_cast
def _get_and_cast_fn(data):
"""Get and cast tensors by name, optionally changing the name too."""
return {
new_name:
data[name] if new_dtype is None else tf.cast(data[name], new_dtype)
for name, new_name, new_dtype in _tensors_to_cast()
}
return _get_and_cast_fn | Create a function that gets and casts a set of tensors from the dataset. Optionally, you can also rename the tensors. Examples: # This simply gets "image" and "label" tensors without any casting. # Note that this is equivalent to make_get_tensors_fn(["image", "label"]). make_get_and_cast_tensors_fn({ "image": None, "label": None, }) # This gets the "image" tensor without any type conversion, casts the # "heatmap" tensor to tf.float32, and renames the tensor "class/label" to # "label" and casts it to tf.int64. make_get_and_cast_tensors_fn({ "image": None, "heatmap": tf.float32, "class/label": ("label", tf.int64), }) Args: output_tensors: dictionary specifying the set of tensors to get and cast from the dataset. Returns: The function performing the operation. |
20,387 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
The provided code snippet includes necessary dependencies for implementing the `compose_preprocess_fn` function. Write a Python function `def compose_preprocess_fn(*functions)` to solve the following problem:
Compose two or more preprocessing functions. Args: *functions: Sequence of preprocess functions to compose. Returns: The composed function.
Here is the function:
def compose_preprocess_fn(*functions):
"""Compose two or more preprocessing functions.
Args:
*functions: Sequence of preprocess functions to compose.
Returns:
The composed function.
"""
def _composed_fn(x):
for fn in functions:
if fn is not None: # Note: If one function is None, equiv. to identity.
x = fn(x)
return x
return _composed_fn | Compose two or more preprocessing functions. Args: *functions: Sequence of preprocess functions to compose. Returns: The composed function. |
20,388 | import functools
import tensorflow.compat.v1 as tf
import torch
import torch.utils.data
import numpy as np
from collections import Counter
from torch import Tensor
from ..vtab_datasets import base
from ..vtab_datasets import caltech
from ..vtab_datasets import cifar
from ..vtab_datasets import clevr
from ..vtab_datasets import diabetic_retinopathy
from ..vtab_datasets import dmlab
from ..vtab_datasets import dsprites
from ..vtab_datasets import dtd
from ..vtab_datasets import eurosat
from ..vtab_datasets import kitti
from ..vtab_datasets import oxford_flowers102
from ..vtab_datasets import oxford_iiit_pet
from ..vtab_datasets import patch_camelyon
from ..vtab_datasets import resisc45
from ..vtab_datasets import smallnorb
from ..vtab_datasets import sun397
from ..vtab_datasets import svhn
from ..vtab_datasets.registry import Registry
from ...utils import logging
DATASETS = [
'caltech101',
'cifar(num_classes=100)',
'dtd',
'oxford_flowers102',
'oxford_iiit_pet',
'patch_camelyon',
'sun397',
'svhn',
'resisc45',
'eurosat',
'dmlab',
'kitti(task="closest_vehicle_distance")',
'smallnorb(predicted_attribute="label_azimuth")',
'smallnorb(predicted_attribute="label_elevation")',
'dsprites(predicted_attribute="label_x_position",num_classes=16)',
'dsprites(predicted_attribute="label_orientation",num_classes=16)',
'clevr(task="closest_object_distance")',
'clevr(task="count_all")',
'diabetic_retinopathy(config="btgraham-300")'
]
def preprocess_fn(data, size=224, input_range=(0.0, 1.0)):
image = data["image"]
image = tf.image.resize(image, [size, size])
image = tf.cast(image, tf.float32) / 255.0
image = image * (input_range[1] - input_range[0]) + input_range[0]
data["image"] = image
return data
class Registry(object):
"""Implements global Registry."""
_GLOBAL_REGISTRY = {}
def global_registry():
return Registry._GLOBAL_REGISTRY
def register(name, item_type):
"""Creates a function that registers its input."""
if item_type not in ["function", "class"]:
raise ValueError("Unknown item type: %s" % item_type)
def _register(item):
if name in Registry.global_registry():
raise KeyError(
"The name {!r} was already registered in with type {!r}".format(
name, item_type))
Registry.global_registry()[name] = (item, item_type)
return item
return _register
def lookup(lookup_string, kwargs_extra=None):
"""Lookup a name in the registry."""
name, kwargs = parse_name(lookup_string)
if kwargs_extra:
kwargs.update(kwargs_extra)
item, item_type = Registry.global_registry()[name]
if item_type == "function":
return functools.partial(item, **kwargs)
elif item_type == "class":
return partialclass(item, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `build_tf_dataset` function. Write a Python function `def build_tf_dataset(cfg, mode)` to solve the following problem:
Builds a tf data instance, then transform to a list of tensors and labels
Here is the function:
def build_tf_dataset(cfg, mode):
"""
Builds a tf data instance, then transform to a list of tensors and labels
"""
if mode not in ["train", "val", "test", "trainval"]:
raise ValueError("The input pipeline supports `train`, `val`, `test`."
"Provided mode is {}".format(mode))
vtab_dataname = cfg.DATA.NAME.split("vtab-")[-1]
data_dir = cfg.DATA.DATAPATH
if vtab_dataname in DATASETS:
data_cls = Registry.lookup("data." + vtab_dataname)
vtab_tf_dataloader = data_cls(data_dir=data_dir)
else:
raise ValueError("Unknown type for \"dataset\" field: {}".format(
type(vtab_dataname)))
split_name_dict = {
"dataset_train_split_name": "train800",
"dataset_val_split_name": "val200",
"dataset_trainval_split_name": "train800val200",
"dataset_test_split_name": "test",
}
def _dict_to_tuple(batch):
return batch['image'], batch['label']
return vtab_tf_dataloader.get_tf_data(
batch_size=1, # data_params["batch_size"],
drop_remainder=False,
split_name=split_name_dict[f"dataset_{mode}_split_name"],
preprocess_fn=functools.partial(
preprocess_fn,
input_range=(0.0, 1.0),
size=cfg.DATA.CROPSIZE,
),
for_eval=mode != "train", # handles shuffling
shuffle_buffer_size=1000,
prefetch=1,
train_examples=None,
epochs=1 # setting epochs to 1 make sure it returns one copy of the dataset
).map(_dict_to_tuple) # return a PrefetchDataset object. (which does not have much documentation to go on) | Builds a tf data instance, then transform to a list of tensors and labels |
20,389 | import functools
import tensorflow.compat.v1 as tf
import torch
import torch.utils.data
import numpy as np
from collections import Counter
from torch import Tensor
from ..vtab_datasets import base
from ..vtab_datasets import caltech
from ..vtab_datasets import cifar
from ..vtab_datasets import clevr
from ..vtab_datasets import diabetic_retinopathy
from ..vtab_datasets import dmlab
from ..vtab_datasets import dsprites
from ..vtab_datasets import dtd
from ..vtab_datasets import eurosat
from ..vtab_datasets import kitti
from ..vtab_datasets import oxford_flowers102
from ..vtab_datasets import oxford_iiit_pet
from ..vtab_datasets import patch_camelyon
from ..vtab_datasets import resisc45
from ..vtab_datasets import smallnorb
from ..vtab_datasets import sun397
from ..vtab_datasets import svhn
from ..vtab_datasets.registry import Registry
from ...utils import logging
def to_torch_imgs(img: np.ndarray, mean: Tensor, std: Tensor) -> Tensor:
t_img: Tensor = torch.from_numpy(np.transpose(img, (2, 0, 1)))
t_img -= mean
t_img /= std
return t_img | null |
20,390 | import math
import torch.optim as optim
from fvcore.common.config import CfgNode
from torch.optim.lr_scheduler import LambdaLR
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps`.
Decreases learning rate from 1. to 0. over remaining
`t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate
follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(
optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(
1, self.t_total - self.warmup_steps))
return max(
0.0,
0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress))
)
class WarmupCosineWithHardRestartsSchedule(LambdaLR):
""" Linear warmup and then cosine cycles with hard restarts.
Linearly increases learning rate from 0 to 1 over `warmup_steps`.
If `cycles` (default=1.) is different from default, learning rate
follows `cycles` times a cosine decaying learning rate
(with hard restarts).
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=1., last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineWithHardRestartsSchedule, self).__init__(
optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(
max(1, self.t_total - self.warmup_steps))
if progress >= 1.0:
return 0.0
return max(
0.0,
0.5 * (1. + math.cos(
math.pi * ((float(self.cycles) * progress) % 1.0)))
)
def make_scheduler(
optimizer: optim.Optimizer, train_params: CfgNode
) -> LambdaLR:
warmup = train_params.WARMUP_EPOCH
total_iters = train_params.TOTAL_EPOCH
if train_params.SCHEDULER == "cosine":
scheduler = WarmupCosineSchedule(
optimizer,
warmup_steps=warmup,
t_total=total_iters
)
elif train_params.SCHEDULER == "cosine_hardrestart":
scheduler = WarmupCosineWithHardRestartsSchedule(
optimizer,
warmup_steps=warmup,
t_total=total_iters
)
elif train_params.SCHEDULER == "plateau":
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
"max",
patience=5,
verbose=True,
factor=train_params.LR_DECAY_FACTOR,
)
else:
scheduler = None
return scheduler | null |
20,391 | import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
from ..utils import logging
LOSS = {
"softmax": SoftmaxLoss,
}
def build_loss(cfg):
loss_name = cfg.SOLVER.LOSS
assert loss_name in LOSS, \
f'loss name {loss_name} is not supported'
loss_fn = LOSS[loss_name]
if not loss_fn:
return None
else:
return loss_fn(cfg) | null |
20,392 | import math
import torch
from fvcore.common.config import CfgNode
from torch.optim import Optimizer
import torch.optim as optim
from typing import Any, Callable, Iterable, List, Tuple, Optional
from ..utils import logging
logger = logging.get_logger("visual_prompt")
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(
self,
params: Iterable,
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True
) -> None:
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = {
"lr": lr, "betas": betas, "eps": eps,
"weight_decay": weight_decay, "correct_bias": correct_bias
}
super(AdamW, self).__init__(params, defaults)
def step(self, closure: Optional[Callable] = None) -> Optional[Callable]:
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, "
"please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = step_size * math.sqrt(
bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
return loss
def make_optimizer(
models: List[Any], train_params: CfgNode
) -> Optimizer:
params = []
for model in models:
# only include learnable params
if train_params.DBG_TRAINABLE:
logger.info("Trainable params:")
for key, value in model.named_parameters():
if value.requires_grad:
if train_params.DBG_TRAINABLE:
logger.info("\t{}, {}, {}".format(key, value.numel(), value.shape))
params.append((key, value))
if train_params.WEIGHT_DECAY > 0:
if train_params.OPTIMIZER == 'adamw':
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in params
if not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in params
if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=train_params.BASE_LR,
)
else:
_params = []
for p in params:
key, value = p
# print(key)
# if not value.requires_grad:
# continue
lr = train_params.BASE_LR
weight_decay = train_params.WEIGHT_DECAY
if "last_layer.bias" in key:
# no regularization (weight decay) for last layer's bias
weight_decay = 0.0
if train_params.BIAS_MULTIPLIER == 1.:
_params += [{
"params": [value],
"lr": lr,
"weight_decay": weight_decay
}]
else:
if "bias" in key and "last_layer.bias" not in key:
# use updated lr for this param
lr_value = lr * train_params.BIAS_MULTIPLIER
else:
lr_value = lr
if train_params.DBG_TRAINABLE:
logger.info("\t{}, {:.4f}".format(key, lr_value))
_params += [{
"params": [value],
"lr": lr_value,
"weight_decay": weight_decay
}]
if train_params.OPTIMIZER == 'adam':
optimizer = optim.Adam(
_params,
lr=train_params.BASE_LR,
weight_decay=train_params.WEIGHT_DECAY,
)
else:
optimizer = optim.SGD(
_params,
train_params.BASE_LR,
momentum=train_params.MOMENTUM,
weight_decay=train_params.WEIGHT_DECAY
)
return optimizer
else:
if train_params.OPTIMIZER == 'adam':
optimizer = optim.Adam(
model.parameters(),
lr=train_params.BASE_LR
)
else:
_params = []
for p in params:
key, value = p
lr = train_params.BASE_LR
if train_params.BIAS_MULTIPLIER == 1.:
_params += [{
"params": [value],
"lr": lr,
}]
else:
if "bias" in key and "last_layer.bias" not in key:
# use updated lr for this param
lr_value = lr * train_params.BIAS_MULTIPLIER
else:
lr_value = lr
if train_params.DBG_TRAINABLE:
logger.info("\t{}, {:.4f}".format(key, lr_value))
_params += [{
"params": [value],
"lr": lr_value,
}]
optimizer = optim.SGD(
_params,
train_params.BASE_LR,
momentum=train_params.MOMENTUM,
)
return optimizer | null |
20,393 | import ml_collections
The provided code snippet includes necessary dependencies for implementing the `get_testing` function. Write a Python function `def get_testing()` to solve the following problem:
Returns a minimal configuration for testing.
Here is the function:
def get_testing():
"""Returns a minimal configuration for testing."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 1
config.transformer.num_heads = 1
config.transformer.num_layers = 1
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config | Returns a minimal configuration for testing. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.