id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
162,327 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
The provided code snippet includes necessary dependencies for implementing the `orientation_loss` function. Write a Python function `def orientation_loss( weights: TensorType["bs":..., "num_samples", 1], normals: TensorType["bs":..., "num_samples", 3], viewdirs: TensorType["bs":..., 3], )` to solve the following problem:
Orientation loss proposed in Ref-NeRF. Loss that encourages that all visible normals are facing towards the camera.
Here is the function:
def orientation_loss(
weights: TensorType["bs":..., "num_samples", 1],
normals: TensorType["bs":..., "num_samples", 3],
viewdirs: TensorType["bs":..., 3],
):
"""Orientation loss proposed in Ref-NeRF.
Loss that encourages that all visible normals are facing towards the camera.
"""
w = weights
n = normals
v = viewdirs
n_dot_v = (n * v[..., None, :]).sum(axis=-1)
return (w[..., 0] * torch.fmin(torch.zeros_like(n_dot_v), n_dot_v) ** 2).sum(dim=-1) | Orientation loss proposed in Ref-NeRF. Loss that encourages that all visible normals are facing towards the camera. |
162,328 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
The provided code snippet includes necessary dependencies for implementing the `pred_normal_loss` function. Write a Python function `def pred_normal_loss( weights: TensorType["bs":..., "num_samples", 1], normals: TensorType["bs":..., "num_samples", 3], pred_normals: TensorType["bs":..., "num_samples", 3], )` to solve the following problem:
Loss between normals calculated from density and normals from prediction network.
Here is the function:
def pred_normal_loss(
weights: TensorType["bs":..., "num_samples", 1],
normals: TensorType["bs":..., "num_samples", 3],
pred_normals: TensorType["bs":..., "num_samples", 3],
):
"""Loss between normals calculated from density and normals from prediction network."""
return (weights[..., 0] * (1.0 - torch.sum(normals * pred_normals, dim=-1))).sum(dim=-1) | Loss between normals calculated from density and normals from prediction network. |
162,329 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
The provided code snippet includes necessary dependencies for implementing the `monosdf_normal_loss` function. Write a Python function `def monosdf_normal_loss(normal_pred: torch.Tensor, normal_gt: torch.Tensor)` to solve the following problem:
normal consistency loss as monosdf Args: normal_pred (torch.Tensor): volume rendered normal normal_gt (torch.Tensor): monocular normal
Here is the function:
def monosdf_normal_loss(normal_pred: torch.Tensor, normal_gt: torch.Tensor):
"""normal consistency loss as monosdf
Args:
normal_pred (torch.Tensor): volume rendered normal
normal_gt (torch.Tensor): monocular normal
"""
normal_gt = torch.nn.functional.normalize(normal_gt, p=2, dim=-1)
normal_pred = torch.nn.functional.normalize(normal_pred, p=2, dim=-1)
l1 = torch.abs(normal_pred - normal_gt).sum(dim=-1).mean()
cos = (1.0 - torch.sum(normal_pred * normal_gt, dim=-1)).mean()
return l1 + cos | normal consistency loss as monosdf Args: normal_pred (torch.Tensor): volume rendered normal normal_gt (torch.Tensor): monocular normal |
162,330 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
def compute_scale_and_shift(prediction, target, mask):
# system matrix: A = [[a_00, a_01], [a_10, a_11]]
a_00 = torch.sum(mask * prediction * prediction, (1, 2))
a_01 = torch.sum(mask * prediction, (1, 2))
a_11 = torch.sum(mask, (1, 2))
# right hand side: b = [b_0, b_1]
b_0 = torch.sum(mask * prediction * target, (1, 2))
b_1 = torch.sum(mask * target, (1, 2))
# solution: x = A^-1 . b = [[a_11, -a_01], [-a_10, a_00]] / (a_00 * a_11 - a_01 * a_10) . b
x_0 = torch.zeros_like(b_0)
x_1 = torch.zeros_like(b_1)
det = a_00 * a_11 - a_01 * a_01
valid = det.nonzero()
x_0[valid] = (a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]
x_1[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]
return x_0, x_1 | null |
162,331 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
def reduction_image_based(image_loss, M):
# mean of average of valid pixels of an image
# avoid division by 0 (if M = sum(mask) = 0: image_loss = 0)
valid = M.nonzero()
image_loss[valid] = image_loss[valid] / M[valid]
return torch.mean(image_loss) | null |
162,332 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
def reduction_batch_based(image_loss, M):
# average of all valid pixels of the batch
# avoid division by 0 (if sum(M) = sum(sum(mask)) = 0: sum(image_loss) = 0)
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
res = prediction - target
image_loss = torch.sum(mask * res * res, (1, 2))
return reduction(image_loss, 2 * M) | null |
162,333 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
def reduction_batch_based(image_loss, M):
# average of all valid pixels of the batch
# avoid division by 0 (if sum(M) = sum(sum(mask)) = 0: sum(image_loss) = 0)
divisor = torch.sum(M)
if divisor == 0:
return 0
else:
return torch.sum(image_loss) / divisor
def gradient_loss(prediction, target, mask, reduction=reduction_batch_based):
M = torch.sum(mask, (1, 2))
diff = prediction - target
diff = torch.mul(mask, diff)
grad_x = torch.abs(diff[:, :, 1:] - diff[:, :, :-1])
mask_x = torch.mul(mask[:, :, 1:], mask[:, :, :-1])
grad_x = torch.mul(mask_x, grad_x)
grad_y = torch.abs(diff[:, 1:, :] - diff[:, :-1, :])
mask_y = torch.mul(mask[:, 1:, :], mask[:, :-1, :])
grad_y = torch.mul(mask_y, grad_y)
image_loss = torch.sum(grad_x, (1, 2)) + torch.sum(grad_y, (1, 2))
return reduction(image_loss, M) | null |
162,334 | import numpy as np
import torch
from torch import nn
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import RaySamples
class RaySamples(TensorDataclass):
"""Samples along a ray"""
frustums: Frustums
"""Frustums along ray."""
camera_indices: Optional[TensorType["bs":..., 1]] = None
"""Camera index."""
deltas: Optional[TensorType["bs":..., 1]] = None
""""width" of each sample."""
spacing_starts: Optional[TensorType["bs":..., "num_samples", 1]] = None
"""Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling."""
spacing_ends: Optional[TensorType["bs":..., "num_samples", 1]] = None
"""Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling."""
spacing_to_euclidean_fn: Optional[Callable] = None
"""Function to convert bins to euclidean distance."""
metadata: Optional[Dict[str, TensorType["bs":..., "latent_dims"]]] = None
"""addtional information relevant to generating ray samples"""
times: Optional[TensorType[..., 1]] = None
"""Times at which rays are sampled"""
def get_alphas(self, densities: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
return alphas
def get_weights(self, densities: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)
transmittance = torch.cat(
[torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2
)
transmittance = torch.exp(-transmittance) # [..., "num_samples"]
weights = alphas * transmittance # [..., "num_samples"]
return weights
def get_weights_and_transmittance(
self, densities: TensorType[..., "num_samples", 1]
) -> Tuple[TensorType[..., "num_samples", 1], TensorType[..., "num_samples", 1]]:
"""Return weights and transmittance based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights and transmittance for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)
transmittance = torch.cat(
[torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2
)
transmittance = torch.exp(-transmittance) # [..., "num_samples"]
weights = alphas * transmittance # [..., "num_samples"]
return weights, transmittance
def get_weights_from_alphas(self, alphas: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted alphas
Args:
alphas: Predicted alphas (maybe from sdf) for samples along ray
Returns:
Weights for each sample
"""
transmittance = torch.cumprod(
torch.cat([torch.ones((*alphas.shape[:1], 1, 1), device=alphas.device), 1.0 - alphas + 1e-7], 1), 1
) # [..., "num_samples"]
weights = alphas * transmittance[:, :-1, :] # [..., "num_samples"]
return weights
def get_weights_and_transmittance_from_alphas(
self, alphas: TensorType[..., "num_samples", 1]
) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted alphas
Args:
alphas: Predicted alphas (maybe from sdf) for samples along ray
Returns:
Weights for each sample
"""
transmittance = torch.cumprod(
torch.cat([torch.ones((*alphas.shape[:1], 1, 1), device=alphas.device), 1.0 - alphas + 1e-7], 1), 1
) # [..., "num_samples"]
weights = alphas * transmittance[:, :-1, :] # [..., "num_samples"]
return weights, transmittance
The provided code snippet includes necessary dependencies for implementing the `get_intersection_points` function. Write a Python function `def get_intersection_points( ray_samples: RaySamples, sdf: torch.Tensor, normal: torch.Tensor, in_image_mask: torch.Tensor )` to solve the following problem:
compute intersection points Args: ray_samples (RaySamples): _description_ sdf (torch.Tensor): _description_ normal (torch.Tensor): _description_ in_image_mask (torch.Tensor): we only use the rays in the range of [half_patch:h-half_path, half_patch:w-half_path] Returns: _type_: _description_
Here is the function:
def get_intersection_points(
ray_samples: RaySamples, sdf: torch.Tensor, normal: torch.Tensor, in_image_mask: torch.Tensor
):
"""compute intersection points
Args:
ray_samples (RaySamples): _description_
sdf (torch.Tensor): _description_
normal (torch.Tensor): _description_
in_image_mask (torch.Tensor): we only use the rays in the range of [half_patch:h-half_path, half_patch:w-half_path]
Returns:
_type_: _description_
"""
# TODO we should support different ways to compute intersections
# Calculate if sign change occurred and concat 1 (no sign change) in
# last dimension
n_rays, n_samples = ray_samples.shape
starts = ray_samples.frustums.starts
sign_matrix = torch.cat([torch.sign(sdf[:, :-1, 0] * sdf[:, 1:, 0]), torch.ones(n_rays, 1).to(sdf.device)], dim=-1)
cost_matrix = sign_matrix * torch.arange(n_samples, 0, -1).float().to(sdf.device)
# Get first sign change and mask for values where a.) a sign changed
# occurred and b.) no a neg to pos sign change occurred (meaning from
# inside surface to outside)
values, indices = torch.min(cost_matrix, -1)
mask_sign_change = values < 0
mask_pos_to_neg = sdf[torch.arange(n_rays), indices, 0] > 0
# Define mask where a valid depth value is found
mask = mask_sign_change & mask_pos_to_neg & in_image_mask
# Get depth values and function values for the interval
d_low = starts[torch.arange(n_rays), indices, 0][mask]
v_low = sdf[torch.arange(n_rays), indices, 0][mask]
n_low = normal[torch.arange(n_rays), indices, :][mask]
indices = torch.clamp(indices + 1, max=n_samples - 1)
d_high = starts[torch.arange(n_rays), indices, 0][mask]
v_high = sdf[torch.arange(n_rays), indices, 0][mask]
n_high = normal[torch.arange(n_rays), indices, :][mask]
# linear-interpolations or run secant method to refine depth
z = (v_low * d_high - v_high * d_low) / (v_low - v_high)
# make this simpler
origins = ray_samples.frustums.origins[torch.arange(n_rays), indices, :][mask]
directions = ray_samples.frustums.directions[torch.arange(n_rays), indices, :][mask]
intersection_points = origins + directions * z[..., None]
# interpolate normal for simplicity so we don't need to call the model again
points_normal = (v_low[..., None] * n_high - v_high[..., None] * n_low) / (v_low[..., None] - v_high[..., None])
points_normal = torch.nn.functional.normalize(points_normal, dim=-1, p=2)
# filter normals that are perpendicular to view directions
valid = (points_normal * directions).sum(dim=-1).abs() > 0.1
intersection_points = intersection_points[valid]
points_normal = points_normal[valid]
new_mask = mask.clone()
new_mask[mask] &= valid
return intersection_points, points_normal, new_mask | compute intersection points Args: ray_samples (RaySamples): _description_ sdf (torch.Tensor): _description_ normal (torch.Tensor): _description_ in_image_mask (torch.Tensor): we only use the rays in the range of [half_patch:h-half_path, half_patch:w-half_path] Returns: _type_: _description_ |
162,335 | import numpy as np
import torch
from torch import nn
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import RaySamples
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
The provided code snippet includes necessary dependencies for implementing the `get_homography` function. Write a Python function `def get_homography( intersection_points: torch.Tensor, normal: torch.Tensor, cameras: Cameras, valid_angle_thres: float = 0.3 )` to solve the following problem:
get homography Args: intersection_points (torch.Tensor): _description_ normal (torch.Tensor): _description_ cameras (Cameras): _description_
Here is the function:
def get_homography(
intersection_points: torch.Tensor, normal: torch.Tensor, cameras: Cameras, valid_angle_thres: float = 0.3
):
"""get homography
Args:
intersection_points (torch.Tensor): _description_
normal (torch.Tensor): _description_
cameras (Cameras): _description_
"""
device = intersection_points.device
# construct homography
c2w = cameras.camera_to_worlds.to(device)
K = cameras.get_intrinsics_matrices().to(device)
K_inv = torch.linalg.inv(K)
# convert camera to opencv format
c2w[:, :3, 1:3] *= -1
w2c_r = c2w[:, :3, :3].transpose(1, 2)
w2c_t = -w2c_r @ c2w[:, :3, 3:]
w2c = torch.cat([w2c_r, w2c_t], dim=-1)
R_rel = w2c[:, :3, :3] @ c2w[:1, :3, :3] # [N, 3, 3]
t_rel = w2c[:, :3, :3] @ c2w[:1, :3, 3:] + w2c[:1, :3, 3:] # [N, 3, 1]
p_ref = w2c[0, :3, :3] @ intersection_points.transpose(1, 0) + w2c[0, :3, 3:] # [3, n_pts]
n_ref = w2c[0, :3, :3] @ normal.transpose(1, 0) # [3, n_pts]
d = torch.sum(n_ref * p_ref, dim=0, keepdims=True)
# TODO make this clear
H = R_rel[:, None, :, :] + t_rel[:, None, :, :] @ n_ref.transpose(1, 0)[None, :, None, :] / d[..., None, None]
H = K[:, None] @ H @ K_inv[None, :1] # [n_cameras, n_pts, 3, 3]
# compute valid mask for homograpy, we should filter normal that are prependicular to source viewing ray directions
dir_src = torch.nn.functional.normalize(c2w[:, None, :, 3] - intersection_points[None], dim=-1)
valid = (dir_src * normal[None]).sum(dim=-1) > valid_angle_thres
# point should be in front of cameras
p_src = w2c[:, :3, :3] @ intersection_points.transpose(1, 0) + w2c[:, :3, 3:] # [:, 3, n_pts]
valid_2 = p_src[:, 2, :] > 0.01
return H, valid & valid_2 | get homography Args: intersection_points (torch.Tensor): _description_ normal (torch.Tensor): _description_ cameras (Cameras): _description_ |
162,336 | from dataclasses import dataclass
import torch
from torchtyping import TensorType
The provided code snippet includes necessary dependencies for implementing the `components_from_spherical_harmonics` function. Write a Python function `def components_from_spherical_harmonics(levels: int, directions: TensorType[..., 3]) -> TensorType[..., "components"]` to solve the following problem:
Returns value for each component of spherical harmonics. Args: levels: Number of spherical harmonic levels to compute. directions: Spherical hamonic coefficients
Here is the function:
def components_from_spherical_harmonics(levels: int, directions: TensorType[..., 3]) -> TensorType[..., "components"]:
"""
Returns value for each component of spherical harmonics.
Args:
levels: Number of spherical harmonic levels to compute.
directions: Spherical hamonic coefficients
"""
num_components = levels**2
components = torch.zeros((*directions.shape[:-1], num_components), device=directions.device)
assert 1 <= levels <= 5, f"SH levels must be in [1,4], got {levels}"
assert directions.shape[-1] == 3, f"Direction input should have three dimensions. Got {directions.shape[-1]}"
x = directions[..., 0]
y = directions[..., 1]
z = directions[..., 2]
xx = x**2
yy = y**2
zz = z**2
# l0
components[..., 0] = 0.28209479177387814
# l1
if levels > 1:
components[..., 1] = 0.4886025119029199 * y
components[..., 2] = 0.4886025119029199 * z
components[..., 3] = 0.4886025119029199 * x
# l2
if levels > 2:
components[..., 4] = 1.0925484305920792 * x * y
components[..., 5] = 1.0925484305920792 * y * z
components[..., 6] = 0.9461746957575601 * zz - 0.31539156525251999
components[..., 7] = 1.0925484305920792 * x * z
components[..., 8] = 0.5462742152960396 * (xx - yy)
# l3
if levels > 3:
components[..., 9] = 0.5900435899266435 * y * (3 * xx - yy)
components[..., 10] = 2.890611442640554 * x * y * z
components[..., 11] = 0.4570457994644658 * y * (5 * zz - 1)
components[..., 12] = 0.3731763325901154 * z * (5 * zz - 3)
components[..., 13] = 0.4570457994644658 * x * (5 * zz - 1)
components[..., 14] = 1.445305721320277 * z * (xx - yy)
components[..., 15] = 0.5900435899266435 * x * (xx - 3 * yy)
# l4
if levels > 4:
components[..., 16] = 2.5033429417967046 * x * y * (xx - yy)
components[..., 17] = 1.7701307697799304 * y * z * (3 * xx - yy)
components[..., 18] = 0.9461746957575601 * x * y * (7 * zz - 1)
components[..., 19] = 0.6690465435572892 * y * (7 * zz - 3)
components[..., 20] = 0.10578554691520431 * (35 * zz * zz - 30 * zz + 3)
components[..., 21] = 0.6690465435572892 * x * z * (7 * zz - 3)
components[..., 22] = 0.47308734787878004 * (xx - yy) * (7 * zz - 1)
components[..., 23] = 1.7701307697799304 * x * z * (xx - 3 * yy)
components[..., 24] = 0.4425326924449826 * (xx * (xx - 3 * yy) - yy * (3 * xx - yy))
return components | Returns value for each component of spherical harmonics. Args: levels: Number of spherical harmonic levels to compute. directions: Spherical hamonic coefficients |
162,337 | from dataclasses import dataclass
import torch
from torchtyping import TensorType
class Gaussians:
"""Stores Gaussians
Args:
mean: Mean of multivariate Gaussian
cov: Covariance of multivariate Gaussian.
"""
mean: TensorType[..., "dim"]
cov: TensorType[..., "dim", "dim"]
def compute_3d_gaussian(
directions: TensorType[..., 3],
means: TensorType[..., 3],
dir_variance: TensorType[..., 1],
radius_variance: TensorType[..., 1],
) -> Gaussians:
"""Compute guassian along ray.
Args:
directions: Axis of Gaussian.
means: Mean of Gaussian.
dir_variance: Variance along direction axis.
radius_variance: Variance tangent to direction axis.
Returns:
Gaussians: Oriented 3D gaussian.
"""
dir_outer_product = directions[..., :, None] * directions[..., None, :]
eye = torch.eye(directions.shape[-1], device=directions.device)
dir_mag_sq = torch.clamp(torch.sum(directions**2, dim=-1, keepdim=True), min=1e-10)
null_outer_product = eye - directions[..., :, None] * (directions / dir_mag_sq)[..., None, :]
dir_cov_diag = dir_variance[..., None] * dir_outer_product[..., :, :]
radius_cov_diag = radius_variance[..., None] * null_outer_product[..., :, :]
cov = dir_cov_diag + radius_cov_diag
return Gaussians(mean=means, cov=cov)
The provided code snippet includes necessary dependencies for implementing the `cylinder_to_gaussian` function. Write a Python function `def cylinder_to_gaussian( origins: TensorType[..., 3], directions: TensorType[..., 3], starts: TensorType[..., 1], ends: TensorType[..., 1], radius: TensorType[..., 1], ) -> Gaussians` to solve the following problem:
Approximates cylinders with a Gaussian distributions. Args: origins: Origins of cylinders. directions: Direction (axis) of cylinders. starts: Start of cylinders. ends: End of cylinders. radius: Radii of cylinders. Returns: Gaussians: Approximation of cylinders
Here is the function:
def cylinder_to_gaussian(
origins: TensorType[..., 3],
directions: TensorType[..., 3],
starts: TensorType[..., 1],
ends: TensorType[..., 1],
radius: TensorType[..., 1],
) -> Gaussians:
"""Approximates cylinders with a Gaussian distributions.
Args:
origins: Origins of cylinders.
directions: Direction (axis) of cylinders.
starts: Start of cylinders.
ends: End of cylinders.
radius: Radii of cylinders.
Returns:
Gaussians: Approximation of cylinders
"""
means = origins + directions * ((starts + ends) / 2.0)
dir_variance = (ends - starts) ** 2 / 12
radius_variance = radius**2 / 4.0
return compute_3d_gaussian(directions, means, dir_variance, radius_variance) | Approximates cylinders with a Gaussian distributions. Args: origins: Origins of cylinders. directions: Direction (axis) of cylinders. starts: Start of cylinders. ends: End of cylinders. radius: Radii of cylinders. Returns: Gaussians: Approximation of cylinders |
162,338 | from dataclasses import dataclass
import torch
from torchtyping import TensorType
class Gaussians:
"""Stores Gaussians
Args:
mean: Mean of multivariate Gaussian
cov: Covariance of multivariate Gaussian.
"""
mean: TensorType[..., "dim"]
cov: TensorType[..., "dim", "dim"]
def compute_3d_gaussian(
directions: TensorType[..., 3],
means: TensorType[..., 3],
dir_variance: TensorType[..., 1],
radius_variance: TensorType[..., 1],
) -> Gaussians:
"""Compute guassian along ray.
Args:
directions: Axis of Gaussian.
means: Mean of Gaussian.
dir_variance: Variance along direction axis.
radius_variance: Variance tangent to direction axis.
Returns:
Gaussians: Oriented 3D gaussian.
"""
dir_outer_product = directions[..., :, None] * directions[..., None, :]
eye = torch.eye(directions.shape[-1], device=directions.device)
dir_mag_sq = torch.clamp(torch.sum(directions**2, dim=-1, keepdim=True), min=1e-10)
null_outer_product = eye - directions[..., :, None] * (directions / dir_mag_sq)[..., None, :]
dir_cov_diag = dir_variance[..., None] * dir_outer_product[..., :, :]
radius_cov_diag = radius_variance[..., None] * null_outer_product[..., :, :]
cov = dir_cov_diag + radius_cov_diag
return Gaussians(mean=means, cov=cov)
The provided code snippet includes necessary dependencies for implementing the `conical_frustum_to_gaussian` function. Write a Python function `def conical_frustum_to_gaussian( origins: TensorType[..., 3], directions: TensorType[..., 3], starts: TensorType[..., 1], ends: TensorType[..., 1], radius: TensorType[..., 1], ) -> Gaussians` to solve the following problem:
Approximates conical frustums with a Gaussian distributions. Uses stable parameterization described in mip-NeRF publication. Args: origins: Origins of cones. directions: Direction (axis) of frustums. starts: Start of conical frustums. ends: End of conical frustums. radius: Radii of cone a distance of 1 from the origin. Returns: Gaussians: Approximation of conical frustums
Here is the function:
def conical_frustum_to_gaussian(
origins: TensorType[..., 3],
directions: TensorType[..., 3],
starts: TensorType[..., 1],
ends: TensorType[..., 1],
radius: TensorType[..., 1],
) -> Gaussians:
"""Approximates conical frustums with a Gaussian distributions.
Uses stable parameterization described in mip-NeRF publication.
Args:
origins: Origins of cones.
directions: Direction (axis) of frustums.
starts: Start of conical frustums.
ends: End of conical frustums.
radius: Radii of cone a distance of 1 from the origin.
Returns:
Gaussians: Approximation of conical frustums
"""
mu = (starts + ends) / 2.0
hw = (ends - starts) / 2.0
means = origins + directions * (mu + (2.0 * mu * hw**2.0) / (3.0 * mu**2.0 + hw**2.0))
dir_variance = (hw**2) / 3 - (4 / 15) * ((hw**4 * (12 * mu**2 - hw**2)) / (3 * mu**2 + hw**2) ** 2)
radius_variance = radius**2 * ((mu**2) / 4 + (5 / 12) * hw**2 - 4 / 15 * (hw**4) / (3 * mu**2 + hw**2))
return compute_3d_gaussian(directions, means, dir_variance, radius_variance) | Approximates conical frustums with a Gaussian distributions. Uses stable parameterization described in mip-NeRF publication. Args: origins: Origins of cones. directions: Direction (axis) of frustums. starts: Start of conical frustums. ends: End of conical frustums. radius: Radii of cone a distance of 1 from the origin. Returns: Gaussians: Approximation of conical frustums |
162,339 | from dataclasses import dataclass
import torch
from torchtyping import TensorType
The provided code snippet includes necessary dependencies for implementing the `expected_sin` function. Write a Python function `def expected_sin(x_means: torch.Tensor, x_vars: torch.Tensor) -> torch.Tensor` to solve the following problem:
Computes the expected value of sin(y) where y ~ N(x_means, x_vars) Args: x_means: Mean values. x_vars: Variance of values. Returns: torch.Tensor: The expected value of sin.
Here is the function:
def expected_sin(x_means: torch.Tensor, x_vars: torch.Tensor) -> torch.Tensor:
"""Computes the expected value of sin(y) where y ~ N(x_means, x_vars)
Args:
x_means: Mean values.
x_vars: Variance of values.
Returns:
torch.Tensor: The expected value of sin.
"""
return torch.exp(-0.5 * x_vars) * torch.sin(x_means) | Computes the expected value of sin(y) where y ~ N(x_means, x_vars) Args: x_means: Mean values. x_vars: Variance of values. Returns: torch.Tensor: The expected value of sin. |
162,340 | from __future__ import annotations
import enum
from abc import abstractmethod
from pathlib import Path
from time import time
from typing import Any, Dict, List, Optional, Union
import torch
import wandb
from rich.console import Console
from torch.utils.tensorboard import SummaryWriter
from torchtyping import TensorType
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.printing import human_format
EVENT_STORAGE = []
class EventName(enum.Enum):
"""Names of possible events that can be logged via Local Writer for convenience.
see config/logging/default_logging.yaml"""
ITER_TRAIN_TIME = "Train Iter (time)"
TOTAL_TRAIN_TIME = "Train Total (time)"
ITER_VIS_TIME = "Viewer Rendering (time)"
ETA = "ETA (time)"
TRAIN_RAYS_PER_SEC = "Train Rays / Sec"
TEST_RAYS_PER_SEC = "Test Rays / Sec"
VIS_RAYS_PER_SEC = "Vis Rays / Sec"
CURR_TEST_PSNR = "Test PSNR"
class EventType(enum.Enum):
"""Possible Event types and their associated write function"""
IMAGE = "write_image"
SCALAR = "write_scalar"
DICT = "write_scalar_dict"
CONFIG = "write_config"
The provided code snippet includes necessary dependencies for implementing the `put_image` function. Write a Python function `def put_image(name, image: TensorType["H", "W", "C"], step: int)` to solve the following problem:
Setter function to place images into the queue to be written out Args: image: image to write out step: step associated with image
Here is the function:
def put_image(name, image: TensorType["H", "W", "C"], step: int):
"""Setter function to place images into the queue to be written out
Args:
image: image to write out
step: step associated with image
"""
if isinstance(name, EventName):
name = name.value
EVENT_STORAGE.append({"name": name, "write_type": EventType.IMAGE, "event": image.detach().cpu(), "step": step}) | Setter function to place images into the queue to be written out Args: image: image to write out step: step associated with image |
162,341 | from __future__ import annotations
import enum
from abc import abstractmethod
from pathlib import Path
from time import time
from typing import Any, Dict, List, Optional, Union
import torch
import wandb
from rich.console import Console
from torch.utils.tensorboard import SummaryWriter
from torchtyping import TensorType
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.printing import human_format
EVENT_STORAGE = []
class EventType(enum.Enum):
"""Possible Event types and their associated write function"""
IMAGE = "write_image"
SCALAR = "write_scalar"
DICT = "write_scalar_dict"
CONFIG = "write_config"
The provided code snippet includes necessary dependencies for implementing the `put_dict` function. Write a Python function `def put_dict(name: str, scalar_dict: Dict[str, Any], step: int)` to solve the following problem:
Setter function to place a dictionary of scalars into the queue to be written out Args: name: name of scalar dictionary scalar_dict: values to write out step: step associated with dict
Here is the function:
def put_dict(name: str, scalar_dict: Dict[str, Any], step: int):
"""Setter function to place a dictionary of scalars into the queue to be written out
Args:
name: name of scalar dictionary
scalar_dict: values to write out
step: step associated with dict
"""
EVENT_STORAGE.append({"name": name, "write_type": EventType.DICT, "event": scalar_dict, "step": step}) | Setter function to place a dictionary of scalars into the queue to be written out Args: name: name of scalar dictionary scalar_dict: values to write out step: step associated with dict |
162,342 | from __future__ import annotations
import enum
from abc import abstractmethod
from pathlib import Path
from time import time
from typing import Any, Dict, List, Optional, Union
import torch
import wandb
from rich.console import Console
from torch.utils.tensorboard import SummaryWriter
from torchtyping import TensorType
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.printing import human_format
EVENT_STORAGE = []
class EventType(enum.Enum):
"""Possible Event types and their associated write function"""
IMAGE = "write_image"
SCALAR = "write_scalar"
DICT = "write_scalar_dict"
CONFIG = "write_config"
The provided code snippet includes necessary dependencies for implementing the `put_config` function. Write a Python function `def put_config(name: str, config_dict: Dict[str, Any], step: int)` to solve the following problem:
Setter function to place a dictionary of scalars into the queue to be written out Args: name: name of scalar dictionary scalar_dict: values to write out step: step associated with dict
Here is the function:
def put_config(name: str, config_dict: Dict[str, Any], step: int):
"""Setter function to place a dictionary of scalars into the queue to be written out
Args:
name: name of scalar dictionary
scalar_dict: values to write out
step: step associated with dict
"""
EVENT_STORAGE.append({"name": name, "write_type": EventType.CONFIG, "event": config_dict, "step": step}) | Setter function to place a dictionary of scalars into the queue to be written out Args: name: name of scalar dictionary scalar_dict: values to write out step: step associated with dict |
162,343 | from __future__ import annotations
import enum
from abc import abstractmethod
from pathlib import Path
from time import time
from typing import Any, Dict, List, Optional, Union
import torch
import wandb
from rich.console import Console
from torch.utils.tensorboard import SummaryWriter
from torchtyping import TensorType
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.printing import human_format
GLOBAL_BUFFER = {}
class EventName(enum.Enum):
"""Names of possible events that can be logged via Local Writer for convenience.
see config/logging/default_logging.yaml"""
ITER_TRAIN_TIME = "Train Iter (time)"
TOTAL_TRAIN_TIME = "Train Total (time)"
ITER_VIS_TIME = "Viewer Rendering (time)"
ETA = "ETA (time)"
TRAIN_RAYS_PER_SEC = "Train Rays / Sec"
TEST_RAYS_PER_SEC = "Test Rays / Sec"
VIS_RAYS_PER_SEC = "Vis Rays / Sec"
CURR_TEST_PSNR = "Test PSNR"
def put_scalar(name: str, scalar: Any, step: int):
"""Setter function to place scalars into the queue to be written out
Args:
name: name of scalar
scalar: value
step: step associated with scalar
"""
if isinstance(name, EventName):
name = name.value
EVENT_STORAGE.append({"name": name, "write_type": EventType.SCALAR, "event": scalar, "step": step})
def _format_time(seconds):
"""utility tool to format time in human readable form given seconds"""
ms = seconds % 1
ms = ms * 1e3
seconds = int(seconds)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return f"{days} d, {hours} h, {minutes} m, {seconds} s"
if hours > 0:
return f"{hours} h, {minutes} m, {seconds} s"
if minutes > 0:
return f"{minutes} m, {seconds} s"
if seconds > 0:
return f"{seconds} s, {ms:0.3f} ms"
return f"{ms:0.3f} ms"
The provided code snippet includes necessary dependencies for implementing the `put_time` function. Write a Python function `def put_time(name: str, duration: float, step: int, avg_over_steps: bool = True, update_eta: bool = False)` to solve the following problem:
Setter function to place a time element into the queue to be written out. Processes the time info according to the options. Args: name: name of time item duration: value step: step associated with value avg_over_steps: if True, calculate and record a running average of the times update_eta: if True, update the ETA. should only be set for the training iterations/s
Here is the function:
def put_time(name: str, duration: float, step: int, avg_over_steps: bool = True, update_eta: bool = False):
"""Setter function to place a time element into the queue to be written out.
Processes the time info according to the options.
Args:
name: name of time item
duration: value
step: step associated with value
avg_over_steps: if True, calculate and record a running average of the times
update_eta: if True, update the ETA. should only be set for the training iterations/s
"""
if isinstance(name, EventName):
name = name.value
if avg_over_steps:
GLOBAL_BUFFER["step"] = step
curr_event = GLOBAL_BUFFER["events"].get(name, {"buffer": [], "avg": 0})
curr_buffer = curr_event["buffer"]
if len(curr_buffer) >= GLOBAL_BUFFER["max_buffer_size"]:
curr_buffer.pop(0)
curr_buffer.append(duration)
curr_avg = sum(curr_buffer) / len(curr_buffer)
put_scalar(name, curr_avg, step)
GLOBAL_BUFFER["events"][name] = {"buffer": curr_buffer, "avg": curr_avg}
else:
put_scalar(name, duration, step)
if update_eta:
## NOTE: eta should be called with avg train iteration time
remain_iter = GLOBAL_BUFFER["max_iter"] - step
remain_time = remain_iter * GLOBAL_BUFFER["events"][name]["avg"]
put_scalar(EventName.ETA, remain_time, step)
GLOBAL_BUFFER["events"][EventName.ETA.value] = _format_time(remain_time) | Setter function to place a time element into the queue to be written out. Processes the time info according to the options. Args: name: name of time item duration: value step: step associated with value avg_over_steps: if True, calculate and record a running average of the times update_eta: if True, update the ETA. should only be set for the training iterations/s |
162,344 | from __future__ import annotations
import enum
from abc import abstractmethod
from pathlib import Path
from time import time
from typing import Any, Dict, List, Optional, Union
import torch
import wandb
from rich.console import Console
from torch.utils.tensorboard import SummaryWriter
from torchtyping import TensorType
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.printing import human_format
EVENT_WRITERS = []
EVENT_STORAGE = []
class LocalWriter:
"""Local Writer Class
TODO: migrate to prettyprint
Args:
config: configuration to instatiate class
banner_messages: list of messages to always display at bottom of screen
"""
def __init__(self, config: cfg.LocalWriterConfig, banner_messages: Optional[List[str]] = None):
self.config = config
self.stats_to_track = [name.value for name in config.stats_to_track]
self.keys = set()
self.past_mssgs = ["", ""]
self.banner_len = 0 if banner_messages is None else len(banner_messages) + 1
if banner_messages:
self.past_mssgs.extend(["-" * 100])
self.past_mssgs.extend(banner_messages)
self.has_printed = False
def write_stats_log(self, step: int) -> None:
"""Function to write out scalars to terminal
Args:
step: current train step
"""
valid_step = step > 0 and step % GLOBAL_BUFFER["steps_per_log"] == 0
if valid_step:
if not self.has_printed and self.config.max_log_size:
CONSOLE.log(
f"Printing max of {self.config.max_log_size} lines. "
"Set flag [yellow]--logging.local-writer.max-log-size=0[/yellow] "
"to disable line wrapping."
)
latest_map, new_key = self._consolidate_events()
self._update_header(latest_map, new_key)
self._print_stats(latest_map)
def write_config(self, name: str, config_dict: Dict[str, Any], step: int):
"""Function that writes out the config to local
Args:
config: config dictionary to write out
"""
# TODO: implement this
def _consolidate_events(self):
latest_map = {}
new_key = False
for event in EVENT_STORAGE:
name = event["name"]
if name not in self.keys:
self.keys.add(name)
new_key = True
latest_map[name] = event["event"]
return latest_map, new_key
def _update_header(self, latest_map, new_key):
"""helper to handle the printing of the header labels
Args:
latest_map: the most recent dictionary of stats that have been recorded
new_key: indicator whether or not there is a new key added to logger
"""
full_log_cond = not self.config.max_log_size and GLOBAL_BUFFER["step"] <= GLOBAL_BUFFER["steps_per_log"]
capped_log_cond = self.config.max_log_size and (len(self.past_mssgs) - self.banner_len <= 2 or new_key)
if full_log_cond or capped_log_cond:
mssg = f"{'Step (% Done)':<20}"
for name, _ in latest_map.items():
if name in self.stats_to_track:
mssg += f"{name:<20} "
self.past_mssgs[0] = mssg
self.past_mssgs[1] = "-" * len(mssg)
if full_log_cond or not self.has_printed:
print(mssg)
print("-" * len(mssg))
def _print_stats(self, latest_map, padding=" "):
"""helper to print out the stats in a readable format
Args:
latest_map: the most recent dictionary of stats that have been recorded
padding: type of characters to print to pad open space
"""
step = GLOBAL_BUFFER["step"]
fraction_done = step / GLOBAL_BUFFER["max_iter"]
curr_mssg = f"{step} ({fraction_done*100:.02f}%)"
curr_mssg = f"{curr_mssg:<20}"
for name, v in latest_map.items():
if name in self.stats_to_track:
if "(time)" in name:
v = _format_time(v)
elif "Rays" in name:
v = human_format(v)
else:
v = f"{v:0.4f}"
curr_mssg += f"{v:<20} "
# update the history buffer
if self.config.max_log_size:
if not self.has_printed:
cursor_idx = len(self.past_mssgs) - self.banner_len
self.has_printed = True
else:
cursor_idx = len(self.past_mssgs)
if len(self.past_mssgs[2:]) - self.banner_len >= self.config.max_log_size:
self.past_mssgs.pop(2)
self.past_mssgs.insert(len(self.past_mssgs) - self.banner_len, curr_mssg)
_cursorup(cursor_idx)
for i, mssg in enumerate(self.past_mssgs):
pad_len = len(max(self.past_mssgs, key=len))
style = "\x1b[6;30;42m" if self.banner_len and i >= len(self.past_mssgs) - self.banner_len + 1 else ""
print(f"{style}{mssg:{padding}<{pad_len}} \x1b[0m")
else:
print(curr_mssg)
The provided code snippet includes necessary dependencies for implementing the `write_out_storage` function. Write a Python function `def write_out_storage()` to solve the following problem:
Function that writes all the events in storage to all the writer locations
Here is the function:
def write_out_storage():
"""Function that writes all the events in storage to all the writer locations"""
for writer in EVENT_WRITERS:
if isinstance(writer, LocalWriter) and len(EVENT_STORAGE) > 0:
writer.write_stats_log(EVENT_STORAGE[0]["step"])
continue
for event in EVENT_STORAGE:
write_func = getattr(writer, event["write_type"].value)
write_func(event["name"], event["event"], event["step"])
EVENT_STORAGE.clear() | Function that writes all the events in storage to all the writer locations |
162,345 | from __future__ import annotations
import enum
from abc import abstractmethod
from pathlib import Path
from time import time
from typing import Any, Dict, List, Optional, Union
import torch
import wandb
from rich.console import Console
from torch.utils.tensorboard import SummaryWriter
from torchtyping import TensorType
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.printing import human_format
CONSOLE = Console(width=120)
EVENT_WRITERS = []
GLOBAL_BUFFER = {}
The provided code snippet includes necessary dependencies for implementing the `setup_local_writer` function. Write a Python function `def setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_messages: Optional[List[str]] = None) -> None` to solve the following problem:
Initialization of all event writers specified in config Args: config: configuration to instantiate loggers max_iter: maximum number of train iterations banner_messages: list of messages to always display at bottom of screen
Here is the function:
def setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_messages: Optional[List[str]] = None) -> None:
"""Initialization of all event writers specified in config
Args:
config: configuration to instantiate loggers
max_iter: maximum number of train iterations
banner_messages: list of messages to always display at bottom of screen
"""
if config.local_writer.enable:
curr_writer = config.local_writer.setup(banner_messages=banner_messages)
EVENT_WRITERS.append(curr_writer)
else:
CONSOLE.log("disabled local writer")
## configure all the global buffer basic information
GLOBAL_BUFFER["max_iter"] = max_iter
GLOBAL_BUFFER["max_buffer_size"] = config.max_buffer_size
GLOBAL_BUFFER["steps_per_log"] = config.steps_per_log
GLOBAL_BUFFER["events"] = {} | Initialization of all event writers specified in config Args: config: configuration to instantiate loggers max_iter: maximum number of train iterations banner_messages: list of messages to always display at bottom of screen |
162,346 | from __future__ import annotations
import enum
from abc import abstractmethod
from pathlib import Path
from time import time
from typing import Any, Dict, List, Optional, Union
import torch
import wandb
from rich.console import Console
from torch.utils.tensorboard import SummaryWriter
from torchtyping import TensorType
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.printing import human_format
CONSOLE = Console(width=120)
EVENT_WRITERS = []
class WandbWriter(Writer):
"""WandDB Writer Class"""
def __init__(self, log_dir: Path, experiment_name: str):
wandb.init(project="sdfstudio", name=experiment_name, dir=str(log_dir), reinit=True)
def write_image(self, name: str, image: TensorType["H", "W", "C"], step: int) -> None:
image = torch.permute(image, (2, 0, 1))
wandb.log({name: wandb.Image(image)}, step=step)
def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:
wandb.log({name: scalar}, step=step)
def write_config(self, name: str, config_dict: Dict[str, Any], step: int):
# pylint: disable=unused-argument
# pylint: disable=no-self-use
"""Function that writes out the config to wandb
Args:
config: config dictionary to write out
"""
wandb.config.update(config_dict)
class TensorboardWriter(Writer):
"""Tensorboard Writer Class"""
def __init__(self, log_dir: Path):
self.tb_writer = SummaryWriter(log_dir=log_dir)
def write_image(self, name: str, image: TensorType["H", "W", "C"], step: int) -> None:
image = to8b(image)
self.tb_writer.add_image(name, image, step, dataformats="HWC")
def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:
self.tb_writer.add_scalar(name, scalar, step)
def write_config(self, name: str, config_dict: Dict[str, Any], step: int): # pylint: disable=unused-argument
"""Function that writes out the config to tensorboard
Args:
config: config dictionary to write out
"""
self.tb_writer.add_text("config", str(config_dict))
The provided code snippet includes necessary dependencies for implementing the `setup_event_writer` function. Write a Python function `def setup_event_writer(config: cfg.Config, log_dir: Path) -> None` to solve the following problem:
Initialization of all event writers specified in config Args: config: configuration to instantiate loggers max_iter: maximum number of train iterations banner_messages: list of messages to always display at bottom of screen
Here is the function:
def setup_event_writer(config: cfg.Config, log_dir: Path) -> None:
"""Initialization of all event writers specified in config
Args:
config: configuration to instantiate loggers
max_iter: maximum number of train iterations
banner_messages: list of messages to always display at bottom of screen
"""
using_event_writer = False
if config.is_wandb_enabled():
curr_writer = WandbWriter(log_dir=log_dir, experiment_name=config.experiment_name)
EVENT_WRITERS.append(curr_writer)
using_event_writer = True
if config.is_tensorboard_enabled():
curr_writer = TensorboardWriter(log_dir=log_dir)
EVENT_WRITERS.append(curr_writer)
using_event_writer = True
if using_event_writer:
string = f"logging events to: {log_dir}"
else:
string = "Disabled tensorboard/wandb event writers"
CONSOLE.print(f"[bold yellow]{string}") | Initialization of all event writers specified in config Args: config: configuration to instantiate loggers max_iter: maximum number of train iterations banner_messages: list of messages to always display at bottom of screen |
162,347 | from __future__ import annotations
import enum
from abc import abstractmethod
from pathlib import Path
from time import time
from typing import Any, Dict, List, Optional, Union
import torch
import wandb
from rich.console import Console
from torch.utils.tensorboard import SummaryWriter
from torchtyping import TensorType
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.printing import human_format
The provided code snippet includes necessary dependencies for implementing the `_cursorup` function. Write a Python function `def _cursorup(x: int)` to solve the following problem:
utility tool to move the cursor up on the terminal Args: x: amount of lines to move cursor upward
Here is the function:
def _cursorup(x: int):
"""utility tool to move the cursor up on the terminal
Args:
x: amount of lines to move cursor upward
"""
print(f"\r\033[{x}A", end="\x1b[1K\r") | utility tool to move the cursor up on the terminal Args: x: amount of lines to move cursor upward |
162,348 | import torch
from torchtyping import TensorType
The provided code snippet includes necessary dependencies for implementing the `inverse` function. Write a Python function `def inverse(pose: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]` to solve the following problem:
Invert provided pose matrix. Args: pose: Camera pose without homogenous coordinate. Returns: Inverse of pose.
Here is the function:
def inverse(pose: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]:
"""Invert provided pose matrix.
Args:
pose: Camera pose without homogenous coordinate.
Returns:
Inverse of pose.
"""
R = pose[..., :3, :3]
t = pose[..., :3, 3:]
R_inverse = R.transpose(-2, -1) # pylint: disable=invalid-name
t_inverse = -R_inverse.matmul(t)
return torch.cat([R_inverse, t_inverse], dim=-1) | Invert provided pose matrix. Args: pose: Camera pose without homogenous coordinate. Returns: Inverse of pose. |
162,349 | import torch
from torchtyping import TensorType
The provided code snippet includes necessary dependencies for implementing the `multiply` function. Write a Python function `def multiply(pose_a: TensorType[..., 3, 4], pose_b: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]` to solve the following problem:
Multiply two pose matrices, A @ B. Args: pose_a: Left pose matrix, usually a transformation applied to the right. pose_b: Right pose matrix, usually a camera pose that will be tranformed by pose_a. Returns: Camera pose matrix where pose_a was applied to pose_b.
Here is the function:
def multiply(pose_a: TensorType[..., 3, 4], pose_b: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]:
"""Multiply two pose matrices, A @ B.
Args:
pose_a: Left pose matrix, usually a transformation applied to the right.
pose_b: Right pose matrix, usually a camera pose that will be tranformed by pose_a.
Returns:
Camera pose matrix where pose_a was applied to pose_b.
"""
R1, t1 = pose_a[..., :3, :3], pose_a[..., :3, 3:]
R2, t2 = pose_b[..., :3, :3], pose_b[..., :3, 3:]
R = R1.matmul(R2)
t = t1 + R1.matmul(t2)
return torch.cat([R, t], dim=-1) | Multiply two pose matrices, A @ B. Args: pose_a: Left pose matrix, usually a transformation applied to the right. pose_b: Right pose matrix, usually a camera pose that will be tranformed by pose_a. Returns: Camera pose matrix where pose_a was applied to pose_b. |
162,350 | import shutil
import sys
from rich.console import Console
CONSOLE = Console(width=120)
The provided code snippet includes necessary dependencies for implementing the `check_ffmpeg_installed` function. Write a Python function `def check_ffmpeg_installed()` to solve the following problem:
Checks if ffmpeg is installed.
Here is the function:
def check_ffmpeg_installed():
"""Checks if ffmpeg is installed."""
ffmpeg_path = shutil.which("ffmpeg")
if ffmpeg_path is None:
CONSOLE.print("[bold red]Could not find ffmpeg. Please install ffmpeg.")
print("See https://ffmpeg.org/download.html for installation instructions.")
print("ffmpeg is only necessary if using videos as input.")
sys.exit(1) | Checks if ffmpeg is installed. |
162,351 | import shutil
import sys
from rich.console import Console
CONSOLE = Console(width=120)
The provided code snippet includes necessary dependencies for implementing the `check_colmap_installed` function. Write a Python function `def check_colmap_installed()` to solve the following problem:
Checks if colmap is installed.
Here is the function:
def check_colmap_installed():
"""Checks if colmap is installed."""
colmap_path = shutil.which("colmap")
if colmap_path is None:
CONSOLE.print("[bold red]Could not find COLMAP. Please install COLMAP.")
print("See https://colmap.github.io/install.html for installation instructions.")
sys.exit(1) | Checks if colmap is installed. |
162,352 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
def get_line_segments_from_lines(
lines: TensorType["num_rays", 2, 3],
color: str = color_str((1, 0, 0)),
marker_color: str = color_str((1, 0, 0)),
colors: Optional[List[str]] = None,
draw_marker: bool = True,
draw_line: bool = True,
marker_size: float = 4,
line_width: float = 10,
) -> List[Any]:
"""Returns a list of Scatter3D objects for creating lines with plotly.
# TODO(ethan): make this function more efficient instead of having a list of objects.
Args:
lines: Tensor of lines.
color: Color of the lines. Defaults to red.
marker_color: Color of the markers. Defaults to red.
colors: List of colors for each line. Defaults to None.
draw_marker: Whether to draw markers. Defaults to True.
draw_line: Whether to draw lines. Defaults to True.
marker_size: Size of the markers. Defaults to 4.
line_width: Width of the lines. Defaults to 10.
Returns:
Scatter3D object on lines.
"""
data = []
for idx, line in enumerate(lines):
thiscolor = color if draw_line else "rgba(0, 0, 0, 0)"
if colors is not None:
marker_color = colors[idx]
thiscolor = colors[idx]
data.append(
go.Scatter3d(
x=line[:, 0],
y=line[:, 1],
z=line[:, 2],
showlegend=False,
marker=dict(
size=marker_size,
color=marker_color,
colorscale="Viridis",
)
if draw_marker
else dict(color="rgba(0, 0, 0, 0)"),
line=dict(color=thiscolor, width=line_width),
)
)
return data
class RayBundle(TensorDataclass):
"""A bundle of ray parameters."""
# TODO(ethan): make sure the sizes with ... are correct
origins: TensorType[..., 3]
"""Ray origins (XYZ)"""
directions: TensorType[..., 3]
"""Unit ray direction vector"""
pixel_area: TensorType[..., 1]
"""Projected area of pixel a distance 1 away from origin"""
directions_norm: Optional[TensorType[..., 1]] = None
"""Norm of ray direction vector before normalization"""
camera_indices: Optional[TensorType[..., 1]] = None
"""Camera indices"""
nears: Optional[TensorType[..., 1]] = None
"""Distance along ray to start sampling"""
fars: Optional[TensorType[..., 1]] = None
"""Rays Distance along ray to stop sampling"""
metadata: Optional[Dict[str, TensorType["num_rays", "latent_dims"]]] = None
"""Additional metadata or data needed for interpolation, will mimic shape of rays"""
times: Optional[TensorType[..., 1]] = None
"""Times at which rays are sampled"""
def set_camera_indices(self, camera_index: int) -> None:
"""Sets all of the the camera indices to a specific camera index.
Args:
camera_index: Camera index.
"""
self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index
def __len__(self):
num_rays = torch.numel(self.origins) // self.origins.shape[-1]
return num_rays
def sample(self, num_rays: int) -> "RayBundle":
"""Returns a RayBundle as a subset of rays.
Args:
num_rays: Number of rays in output RayBundle
Returns:
RayBundle with subset of rays.
"""
assert num_rays <= len(self)
indices = random.sample(range(len(self)), k=num_rays)
return self[indices]
def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> "RayBundle":
"""Flattens RayBundle and extracts chunk given start and end indicies.
Args:
start_idx: Start index of RayBundle chunk.
end_idx: End index of RayBundle chunk.
Returns:
Flattened RayBundle with end_idx-start_idx rays.
"""
return self.flatten()[start_idx:end_idx]
def get_ray_samples(
self,
bin_starts: TensorType["bs":..., "num_samples", 1],
bin_ends: TensorType["bs":..., "num_samples", 1],
spacing_starts: Optional[TensorType["bs":..., "num_samples", 1]] = None,
spacing_ends: Optional[TensorType["bs":..., "num_samples", 1]] = None,
spacing_to_euclidean_fn: Optional[Callable] = None,
) -> RaySamples:
"""Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.
Args:
bin_starts: Distance from origin to start of bin.
bin_ends: Distance from origin to end of bin.
Returns:
Samples projected along ray.
"""
deltas = bin_ends - bin_starts
if self.camera_indices is not None:
camera_indices = self.camera_indices[..., None]
else:
camera_indices = None
shaped_raybundle_fields = self[..., None]
frustums = Frustums(
origins=shaped_raybundle_fields.origins, # [..., 1, 3]
directions=shaped_raybundle_fields.directions, # [..., 1, 3]
starts=bin_starts, # [..., num_samples, 1]
ends=bin_ends, # [..., num_samples, 1]
pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]
)
ray_samples = RaySamples(
frustums=frustums,
camera_indices=camera_indices, # [..., 1, 1]
deltas=deltas, # [..., num_samples, 1]
spacing_starts=spacing_starts, # [..., num_samples, 1]
spacing_ends=spacing_ends, # [..., num_samples, 1]
spacing_to_euclidean_fn=spacing_to_euclidean_fn,
metadata=shaped_raybundle_fields.metadata,
times=None if self.times is None else self.times[..., None], # [..., 1, 1]
)
return ray_samples
The provided code snippet includes necessary dependencies for implementing the `vis_dataset` function. Write a Python function `def vis_dataset(camera_origins: TensorType["num_cameras", 3], ray_bundle: RayBundle) -> go.FigureWidget` to solve the following problem:
Visualize a dataset with plotly using our cameras and generated rays. Args: camera_origins: Tensor of camera origins. ray_bundle: Ray bundle. Returns: plotly figure.
Here is the function:
def vis_dataset(camera_origins: TensorType["num_cameras", 3], ray_bundle: RayBundle) -> go.FigureWidget: # type: ignore
"""Visualize a dataset with plotly using our cameras and generated rays.
Args:
camera_origins: Tensor of camera origins.
ray_bundle: Ray bundle.
Returns:
plotly figure.
"""
skip = 1
size = 8
assert len(ray_bundle) < 500, "Let's not break plotly by plotting too many rays!"
data = []
data += [
go.Scatter3d(
x=camera_origins[::skip, 0],
y=camera_origins[::skip, 1],
z=camera_origins[::skip, 2],
mode="markers",
name="camera origins",
marker=dict(color="rgba(0, 0, 0, 1)", size=size),
)
]
length = 2.0
lines = torch.stack(
[ray_bundle.origins, ray_bundle.origins + ray_bundle.directions * length], dim=1
) # (num_rays, 2, 3)
data += get_line_segments_from_lines(lines)
layout = go.Layout(
autosize=False,
width=1000,
height=1000,
margin=go.layout.Margin(l=50, r=50, b=100, t=100, pad=4), # type: ignore
scene=go.layout.Scene( # type: ignore
aspectmode="data",
camera=dict(up=dict(x=0, y=0, z=1), center=dict(x=0, y=0, z=0), eye=dict(x=1.25, y=1.25, z=1.25)),
),
)
fig = go.Figure(data=data, layout=layout)
return fig | Visualize a dataset with plotly using our cameras and generated rays. Args: camera_origins: Tensor of camera origins. ray_bundle: Ray bundle. Returns: plotly figure. |
162,353 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
The provided code snippet includes necessary dependencies for implementing the `get_sphere` function. Write a Python function `def get_sphere( radius: float, center: TensorType[3] = None, color: str = "black", opacity: float = 1.0, resolution: int = 32 ) -> go.Mesh3d` to solve the following problem:
Returns a sphere object for plotting with plotly. Args: radius: radius of sphere. center: center of sphere. Defaults to origin. color: color of sphere. Defaults to "black". opacity: opacity of sphere. Defaults to 1.0. resolution: resolution of sphere. Defaults to 32. Returns: sphere object.
Here is the function:
def get_sphere(
radius: float, center: TensorType[3] = None, color: str = "black", opacity: float = 1.0, resolution: int = 32
) -> go.Mesh3d: # type: ignore
"""Returns a sphere object for plotting with plotly.
Args:
radius: radius of sphere.
center: center of sphere. Defaults to origin.
color: color of sphere. Defaults to "black".
opacity: opacity of sphere. Defaults to 1.0.
resolution: resolution of sphere. Defaults to 32.
Returns:
sphere object.
"""
phi = torch.linspace(0, 2 * torch.pi, resolution)
theta = torch.linspace(-torch.pi / 2, torch.pi / 2, resolution)
phi, theta = torch.meshgrid(phi, theta, indexing="ij")
x = torch.cos(theta) * torch.sin(phi)
y = torch.cos(theta) * torch.cos(phi)
z = torch.sin(theta)
pts = torch.stack((x, y, z), dim=-1)
pts *= radius
if center is not None:
pts += center
return go.Mesh3d(
{
"x": pts[:, :, 0].flatten(),
"y": pts[:, :, 1].flatten(),
"z": pts[:, :, 2].flatten(),
"alphahull": 0,
"opacity": opacity,
"color": color,
}
) | Returns a sphere object for plotting with plotly. Args: radius: radius of sphere. center: center of sphere. Defaults to origin. color: color of sphere. Defaults to "black". opacity: opacity of sphere. Defaults to 1.0. resolution: resolution of sphere. Defaults to 32. Returns: sphere object. |
162,354 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
The provided code snippet includes necessary dependencies for implementing the `get_cube` function. Write a Python function `def get_cube( side_length: float, center: TensorType[3] = None, color: str = "black", opacity: float = 1.0, ) -> go.Mesh3d` to solve the following problem:
Returns a cube object for plotting with plotly. Args: side_length: side_length of cube. center: center of cube. color: color of cube. opacity: opacity of cube. Returns: cube object.
Here is the function:
def get_cube(
side_length: float,
center: TensorType[3] = None,
color: str = "black",
opacity: float = 1.0,
) -> go.Mesh3d: # type: ignore
"""Returns a cube object for plotting with plotly.
Args:
side_length: side_length of cube.
center: center of cube.
color: color of cube.
opacity: opacity of cube.
Returns:
cube object.
"""
x = np.array([-1.0, -1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0])
y = np.array([-1.0, 1.0, 1.0, -1.0, -1.0, 1.0, 1.0, -1.0])
z = np.array([-1.0, -1.0, -1.0, -1.0, 1.0, 1.0, 1.0, 1.0])
pts = np.stack((x, y, z), axis=0)
pts *= side_length / 2.0
if center is not None:
pts += center
return go.Mesh3d(
{
"x": pts[0],
"y": pts[1],
"z": pts[2],
"alphahull": 0,
"opacity": opacity,
"color": color,
}
) | Returns a cube object for plotting with plotly. Args: side_length: side_length of cube. center: center of cube. color: color of cube. opacity: opacity of cube. Returns: cube object. |
162,355 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
def get_random_color(colormap: Optional[List[str]] = None, idx: Optional[int] = None) -> str:
"""Get a random color from a colormap.
Args:
colormap: List of colors. Defaults to Plotly colors.
idx: Index of color to return. Defaults to None.
Returns:
random color string
"""
if colormap is None:
colormap = ex.colors.qualitative.Plotly
if idx is None:
return colormap[np.random.randint(0, len(colormap))]
return colormap[idx % len(colormap)]
def get_gaussian_ellipsiod(
mean: TensorType[3],
cov: TensorType[3, 3],
n_std: int = 2,
color="lightblue",
opacity: float = 0.5,
resolution: int = 20,
name: str = "ellipse",
) -> go.Mesh3d: # type: ignore
"""Get a plotly ellipsoid for a Gaussian.
Args:
mean: mean of the Gaussian.
cov: covariance of the Gaussian.
n_std: Standard devation to visualize. Defaults to 2 (95% confidence).
color: Color of the ellipsoid. Defaults to None.
opacity: Opacity of the ellipsoid. Defaults to 0.5.
resolution: Resolution of the ellipsoid. Defaults to 20.
name: Name of the ellipsoid. Defaults to "ellipse".
Returns:
ellipsoid object.
"""
phi = torch.linspace(0, 2 * torch.pi, resolution)
theta = torch.linspace(-torch.pi / 2, torch.pi / 2, resolution)
phi, theta = torch.meshgrid(phi, theta, indexing="ij")
x = torch.cos(theta) * torch.sin(phi)
y = torch.cos(theta) * torch.cos(phi)
z = torch.sin(theta)
pts = torch.stack((x, y, z), dim=-1)
eigenvals, eigenvecs = torch.linalg.eigh(cov)
idx = torch.sum(cov, dim=0).argsort()
idx = eigenvals[idx].argsort()
eigenvals = eigenvals[idx]
eigenvecs = eigenvecs[:, idx]
scaling = torch.sqrt(eigenvals) * n_std
pts = pts * scaling
pts = pts @ eigenvecs.t()
pts += mean
return go.Mesh3d(
{
"x": pts[:, :, 0].flatten(),
"y": pts[:, :, 1].flatten(),
"z": pts[:, :, 2].flatten(),
"alphahull": 0,
"opacity": opacity,
"color": color,
"name": name,
}
)
class Gaussians:
"""Stores Gaussians
Args:
mean: Mean of multivariate Gaussian
cov: Covariance of multivariate Gaussian.
"""
mean: TensorType[..., "dim"]
cov: TensorType[..., "dim", "dim"]
The provided code snippet includes necessary dependencies for implementing the `get_gaussian_ellipsoids_list` function. Write a Python function `def get_gaussian_ellipsoids_list( gaussians: Gaussians, opacity: float = 0.5, color: str = "random", resolution: int = 20 ) -> List[Union[go.Mesh3d, go.Scatter3d]]` to solve the following problem:
Get a list of plotly meshes for frustums. Args: gaussians (Gaussians): Gaussians to visualize. opacity (float, optional): Opacity of the mesh. Defaults to 0.3. color (str, optional): Color of the mesh. Defaults to "random". resolution: Resolution of the mesh. Defaults to 20. Returns: List of plotly meshes
Here is the function:
def get_gaussian_ellipsoids_list(
gaussians: Gaussians, opacity: float = 0.5, color: str = "random", resolution: int = 20
) -> List[Union[go.Mesh3d, go.Scatter3d]]: # type: ignore
"""Get a list of plotly meshes for frustums.
Args:
gaussians (Gaussians): Gaussians to visualize.
opacity (float, optional): Opacity of the mesh. Defaults to 0.3.
color (str, optional): Color of the mesh. Defaults to "random".
resolution: Resolution of the mesh. Defaults to 20.
Returns:
List of plotly meshes
"""
data = []
vis_means = go.Scatter3d(
x=gaussians.mean[:, 0],
y=gaussians.mean[:, 1],
z=gaussians.mean[:, 2],
mode="markers",
marker=dict(size=2, color="black"),
name="Means",
)
data.append(vis_means)
for i in range(gaussians.mean.shape[0]):
if color == "random":
c = get_random_color()
else:
c = color
ellipse = get_gaussian_ellipsiod(
gaussians.mean[i],
cov=gaussians.cov[i],
color=c,
opacity=opacity,
resolution=resolution,
)
data.append(ellipse)
return data | Get a list of plotly meshes for frustums. Args: gaussians (Gaussians): Gaussians to visualize. opacity (float, optional): Opacity of the mesh. Defaults to 0.3. color (str, optional): Color of the mesh. Defaults to "random". resolution: Resolution of the mesh. Defaults to 20. Returns: List of plotly meshes |
162,356 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
def get_random_color(colormap: Optional[List[str]] = None, idx: Optional[int] = None) -> str:
"""Get a random color from a colormap.
Args:
colormap: List of colors. Defaults to Plotly colors.
idx: Index of color to return. Defaults to None.
Returns:
random color string
"""
if colormap is None:
colormap = ex.colors.qualitative.Plotly
if idx is None:
return colormap[np.random.randint(0, len(colormap))]
return colormap[idx % len(colormap)]
def get_frustum_mesh(
frustum: Frustums, opacity: float = 0.3, color: str = "#DC203C", resolution: int = 20
) -> go.Mesh3d: # type: ignore
"""Get a plotly mesh for a single frustum.
Args:
frustum: Single frustum
opacity: Opacity of the mesh. Defaults to 0.3.
color: Color of the mesh. Defaults to "#DC203C".
resolution: Resolution of the mesh. Defaults to 20.
Returns:
Plotly mesh
"""
if frustum.ndim > 1:
raise ValueError("Frustum must be a single Frustum object.")
base_radius = torch.sqrt(frustum.pixel_area / torch.pi)
f_radius = frustum.starts * base_radius
b_radius = frustum.ends * base_radius
x = torch.cat([torch.ones(resolution) * frustum.starts, torch.ones(resolution) * frustum.ends])
pts = torch.linspace(0, 2 * torch.pi, resolution)
y = torch.sin(pts)
z = torch.cos(pts)
y = torch.cat([y * f_radius, y * b_radius])
z = torch.cat([z * f_radius, z * b_radius])
pts = torch.stack([x, y, z], dim=-1)
forward = frustum.directions
up = F.normalize(torch.cross(forward, torch.tensor([0.0, 0, 1])), dim=-1) # type: ignore
right = F.normalize(torch.cross(forward, up), dim=-1) # type: ignore
rotation = torch.stack([forward, up, right], dim=1)
pts = torch.einsum("kj,ij->ki", pts, rotation)
pts += frustum.origins
return go.Mesh3d(
x=pts[..., 0],
y=pts[..., 1],
z=pts[..., 2],
opacity=opacity,
alphahull=0,
color=color,
flatshading=True,
name="Frustums",
)
class Frustums(TensorDataclass):
"""Describes region of space as a frustum."""
origins: TensorType["bs":..., 3]
"""xyz coordinate for ray origin."""
directions: TensorType["bs":..., 3]
"""Direction of ray."""
starts: TensorType["bs":..., 1]
"""Where the frustum starts along a ray."""
ends: TensorType["bs":..., 1]
"""Where the frustum ends along a ray."""
pixel_area: TensorType["bs":..., 1]
"""Projected area of pixel a distance 1 away from origin."""
offsets: Optional[TensorType["bs":..., 3]] = None
"""Offsets for each sample position"""
def get_positions(self) -> TensorType[..., 3]:
"""Calulates "center" position of frustum. Not weighted by mass.
Returns:
xyz positions.
"""
pos = self.origins + self.directions * (self.starts + self.ends) / 2
if self.offsets is not None:
pos = pos + self.offsets
return pos
def set_offsets(self, offsets):
"""Sets offsets for this frustum for computing positions"""
self.offsets = offsets
def get_start_positions(self) -> TensorType[..., 3]:
"""Calulates "start" position of frustum. We use start positions for MonoSDF
because when we use error bounded sampling, we need to upsample many times.
It's hard to merge two set of ray samples while keeping the mid points fixed.
Every time we up sample the points the mid points will change and
therefore we need to evaluate all points again which is 3 times slower.
But we can skip the evaluation of sdf value if we use start position instead of mid position
because after we merge the points, the starting point is the same and only the delta is changed.
Returns:
xyz positions.
"""
return self.origins + self.directions * self.starts
def get_gaussian_blob(self) -> Gaussians:
"""Calculates guassian approximation of conical frustum.
Resturns:
Conical frustums approximated by gaussian distribution.
"""
# Cone radius is set such that the square pixel_area matches the cone area.
cone_radius = torch.sqrt(self.pixel_area) / 1.7724538509055159 # r = sqrt(pixel_area / pi)
if self.offsets is not None:
raise NotImplementedError()
return conical_frustum_to_gaussian(
origins=self.origins,
directions=self.directions,
starts=self.starts,
ends=self.ends,
radius=cone_radius,
)
def get_mock_frustum(cls, device="cpu") -> "Frustums":
"""Helper function to generate a placeholder frustum.
Returns:
A size 1 frustum with meaningless values.
"""
return Frustums(
origins=torch.ones((1, 3)).to(device),
directions=torch.ones((1, 3)).to(device),
starts=torch.ones((1, 1)).to(device),
ends=torch.ones((1, 1)).to(device),
pixel_area=torch.ones((1, 1)).to(device),
)
The provided code snippet includes necessary dependencies for implementing the `get_frustums_mesh_list` function. Write a Python function `def get_frustums_mesh_list( frustums: Frustums, opacity: float = 1.0, color: str = "random", resolution: int = 20 ) -> List[go.Mesh3d]` to solve the following problem:
Get a list of plotly meshes for a list of frustums. Args: frustums: List of frustums opacity: Opacity of the mesh. Defaults to 0.3. color: Color of the mesh. Defaults to "random". resolution: Resolution of the mesh. Defaults to 20. Returns: List of plotly meshes
Here is the function:
def get_frustums_mesh_list(
frustums: Frustums, opacity: float = 1.0, color: str = "random", resolution: int = 20
) -> List[go.Mesh3d]: # type: ignore
"""Get a list of plotly meshes for a list of frustums.
Args:
frustums: List of frustums
opacity: Opacity of the mesh. Defaults to 0.3.
color: Color of the mesh. Defaults to "random".
resolution: Resolution of the mesh. Defaults to 20.
Returns:
List of plotly meshes
"""
data = []
for i, frustum in enumerate(frustums.flatten()): # type: ignore
if color == "random":
c = get_random_color(idx=i)
else:
c = color
data.append(get_frustum_mesh(frustum, opacity=opacity, color=c, resolution=resolution))
return data | Get a list of plotly meshes for a list of frustums. Args: frustums: List of frustums opacity: Opacity of the mesh. Defaults to 0.3. color: Color of the mesh. Defaults to "random". resolution: Resolution of the mesh. Defaults to 20. Returns: List of plotly meshes |
162,357 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
class Frustums(TensorDataclass):
"""Describes region of space as a frustum."""
origins: TensorType["bs":..., 3]
"""xyz coordinate for ray origin."""
directions: TensorType["bs":..., 3]
"""Direction of ray."""
starts: TensorType["bs":..., 1]
"""Where the frustum starts along a ray."""
ends: TensorType["bs":..., 1]
"""Where the frustum ends along a ray."""
pixel_area: TensorType["bs":..., 1]
"""Projected area of pixel a distance 1 away from origin."""
offsets: Optional[TensorType["bs":..., 3]] = None
"""Offsets for each sample position"""
def get_positions(self) -> TensorType[..., 3]:
"""Calulates "center" position of frustum. Not weighted by mass.
Returns:
xyz positions.
"""
pos = self.origins + self.directions * (self.starts + self.ends) / 2
if self.offsets is not None:
pos = pos + self.offsets
return pos
def set_offsets(self, offsets):
"""Sets offsets for this frustum for computing positions"""
self.offsets = offsets
def get_start_positions(self) -> TensorType[..., 3]:
"""Calulates "start" position of frustum. We use start positions for MonoSDF
because when we use error bounded sampling, we need to upsample many times.
It's hard to merge two set of ray samples while keeping the mid points fixed.
Every time we up sample the points the mid points will change and
therefore we need to evaluate all points again which is 3 times slower.
But we can skip the evaluation of sdf value if we use start position instead of mid position
because after we merge the points, the starting point is the same and only the delta is changed.
Returns:
xyz positions.
"""
return self.origins + self.directions * self.starts
def get_gaussian_blob(self) -> Gaussians:
"""Calculates guassian approximation of conical frustum.
Resturns:
Conical frustums approximated by gaussian distribution.
"""
# Cone radius is set such that the square pixel_area matches the cone area.
cone_radius = torch.sqrt(self.pixel_area) / 1.7724538509055159 # r = sqrt(pixel_area / pi)
if self.offsets is not None:
raise NotImplementedError()
return conical_frustum_to_gaussian(
origins=self.origins,
directions=self.directions,
starts=self.starts,
ends=self.ends,
radius=cone_radius,
)
def get_mock_frustum(cls, device="cpu") -> "Frustums":
"""Helper function to generate a placeholder frustum.
Returns:
A size 1 frustum with meaningless values.
"""
return Frustums(
origins=torch.ones((1, 3)).to(device),
directions=torch.ones((1, 3)).to(device),
starts=torch.ones((1, 1)).to(device),
ends=torch.ones((1, 1)).to(device),
pixel_area=torch.ones((1, 1)).to(device),
)
The provided code snippet includes necessary dependencies for implementing the `get_frustum_points` function. Write a Python function `def get_frustum_points( frustum: Frustums, opacity: float = 1.0, color: str = "forestgreen", size: float = 5 ) -> go.Scatter3d` to solve the following problem:
Get a set plotly points for frustums centers. Args: frustum: Frustums to visualize. opacity: Opacity of the points. Defaults to 0.3. color: Color of the poinst. Defaults to "forestgreen". size: Size of points. Defaults to 10. Returns: Plotly points
Here is the function:
def get_frustum_points(
frustum: Frustums, opacity: float = 1.0, color: str = "forestgreen", size: float = 5
) -> go.Scatter3d: # type: ignore
"""Get a set plotly points for frustums centers.
Args:
frustum: Frustums to visualize.
opacity: Opacity of the points. Defaults to 0.3.
color: Color of the poinst. Defaults to "forestgreen".
size: Size of points. Defaults to 10.
Returns:
Plotly points
"""
frustum = frustum.flatten()
pts = frustum.get_positions()
return go.Scatter3d(
x=pts[..., 0],
y=pts[..., 1],
z=pts[..., 2],
mode="markers",
marker=dict(
size=size,
color=color,
opacity=opacity,
),
name="Frustums -> Positions",
) | Get a set plotly points for frustums centers. Args: frustum: Frustums to visualize. opacity: Opacity of the points. Defaults to 0.3. color: Color of the poinst. Defaults to "forestgreen". size: Size of points. Defaults to 10. Returns: Plotly points |
162,358 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
class RayBundle(TensorDataclass):
"""A bundle of ray parameters."""
# TODO(ethan): make sure the sizes with ... are correct
origins: TensorType[..., 3]
"""Ray origins (XYZ)"""
directions: TensorType[..., 3]
"""Unit ray direction vector"""
pixel_area: TensorType[..., 1]
"""Projected area of pixel a distance 1 away from origin"""
directions_norm: Optional[TensorType[..., 1]] = None
"""Norm of ray direction vector before normalization"""
camera_indices: Optional[TensorType[..., 1]] = None
"""Camera indices"""
nears: Optional[TensorType[..., 1]] = None
"""Distance along ray to start sampling"""
fars: Optional[TensorType[..., 1]] = None
"""Rays Distance along ray to stop sampling"""
metadata: Optional[Dict[str, TensorType["num_rays", "latent_dims"]]] = None
"""Additional metadata or data needed for interpolation, will mimic shape of rays"""
times: Optional[TensorType[..., 1]] = None
"""Times at which rays are sampled"""
def set_camera_indices(self, camera_index: int) -> None:
"""Sets all of the the camera indices to a specific camera index.
Args:
camera_index: Camera index.
"""
self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index
def __len__(self):
num_rays = torch.numel(self.origins) // self.origins.shape[-1]
return num_rays
def sample(self, num_rays: int) -> "RayBundle":
"""Returns a RayBundle as a subset of rays.
Args:
num_rays: Number of rays in output RayBundle
Returns:
RayBundle with subset of rays.
"""
assert num_rays <= len(self)
indices = random.sample(range(len(self)), k=num_rays)
return self[indices]
def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> "RayBundle":
"""Flattens RayBundle and extracts chunk given start and end indicies.
Args:
start_idx: Start index of RayBundle chunk.
end_idx: End index of RayBundle chunk.
Returns:
Flattened RayBundle with end_idx-start_idx rays.
"""
return self.flatten()[start_idx:end_idx]
def get_ray_samples(
self,
bin_starts: TensorType["bs":..., "num_samples", 1],
bin_ends: TensorType["bs":..., "num_samples", 1],
spacing_starts: Optional[TensorType["bs":..., "num_samples", 1]] = None,
spacing_ends: Optional[TensorType["bs":..., "num_samples", 1]] = None,
spacing_to_euclidean_fn: Optional[Callable] = None,
) -> RaySamples:
"""Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.
Args:
bin_starts: Distance from origin to start of bin.
bin_ends: Distance from origin to end of bin.
Returns:
Samples projected along ray.
"""
deltas = bin_ends - bin_starts
if self.camera_indices is not None:
camera_indices = self.camera_indices[..., None]
else:
camera_indices = None
shaped_raybundle_fields = self[..., None]
frustums = Frustums(
origins=shaped_raybundle_fields.origins, # [..., 1, 3]
directions=shaped_raybundle_fields.directions, # [..., 1, 3]
starts=bin_starts, # [..., num_samples, 1]
ends=bin_ends, # [..., num_samples, 1]
pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]
)
ray_samples = RaySamples(
frustums=frustums,
camera_indices=camera_indices, # [..., 1, 1]
deltas=deltas, # [..., num_samples, 1]
spacing_starts=spacing_starts, # [..., num_samples, 1]
spacing_ends=spacing_ends, # [..., num_samples, 1]
spacing_to_euclidean_fn=spacing_to_euclidean_fn,
metadata=shaped_raybundle_fields.metadata,
times=None if self.times is None else self.times[..., None], # [..., 1, 1]
)
return ray_samples
The provided code snippet includes necessary dependencies for implementing the `get_ray_bundle_lines` function. Write a Python function `def get_ray_bundle_lines( ray_bundle: RayBundle, length: float = 1.0, color: str = "#DC203C", width: float = 1 ) -> go.Scatter3d` to solve the following problem:
Get a plotly line for a ray bundle. Args: ray_bundle: Ray bundle length: Length of the line. Defaults to 1.0. color: Color of the line. width: Width of the line. Defaults to 1. Returns: Plotly lines
Here is the function:
def get_ray_bundle_lines(
ray_bundle: RayBundle, length: float = 1.0, color: str = "#DC203C", width: float = 1
) -> go.Scatter3d: # type: ignore
"""Get a plotly line for a ray bundle.
Args:
ray_bundle: Ray bundle
length: Length of the line. Defaults to 1.0.
color: Color of the line.
width: Width of the line. Defaults to 1.
Returns:
Plotly lines
"""
origins = ray_bundle.origins.view(-1, 3)
directions = ray_bundle.directions.view(-1, 3)
lines = torch.empty((origins.shape[0] * 2, 3))
lines[0::2] = origins
lines[1::2] = origins + directions * length
return go.Scatter3d(
x=lines[..., 0],
y=lines[..., 1],
z=lines[..., 2],
mode="lines",
name="Ray Bundle",
line=dict(color=color, width=width),
) | Get a plotly line for a ray bundle. Args: ray_bundle: Ray bundle length: Length of the line. Defaults to 1.0. color: Color of the line. width: Width of the line. Defaults to 1. Returns: Plotly lines |
162,359 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
The provided code snippet includes necessary dependencies for implementing the `vis_camera_rays` function. Write a Python function `def vis_camera_rays(cameras: Cameras) -> go.Figure` to solve the following problem:
Visualize camera rays. Args: camera: Camera to visualize. Returns: Plotly lines
Here is the function:
def vis_camera_rays(cameras: Cameras) -> go.Figure: # type: ignore
"""Visualize camera rays.
Args:
camera: Camera to visualize.
Returns:
Plotly lines
"""
coords = cameras.get_image_coords()
coords[..., 0] /= cameras.image_height[0] # All the cameras have the same image height for now
coords[..., 1] /= cameras.image_width[0] # All the cameras have the same image width for now
coords = torch.cat([coords, torch.ones((*coords.shape[:-1], 1))], dim=-1)
ray_bundle = cameras.generate_rays(camera_indices=0)
origins = ray_bundle.origins.view(-1, 3)
directions = ray_bundle.directions.view(-1, 3)
coords = coords.view(-1, 3)
lines = torch.empty((origins.shape[0] * 2, 3))
lines[0::2] = origins
lines[1::2] = origins + directions
colors = torch.empty((coords.shape[0] * 2, 3))
colors[0::2] = coords
colors[1::2] = coords
fig = go.Figure(
data=go.Scatter3d(
x=lines[:, 0],
y=lines[:, 2],
z=lines[:, 1],
marker=dict(
size=4,
color=colors,
),
line=dict(color="lightblue", width=1),
)
)
fig.update_layout(
scene=dict(
xaxis=dict(title="x", showspikes=False),
yaxis=dict(title="z", showspikes=False),
zaxis=dict(title="y", showspikes=False),
),
margin=dict(r=0, b=10, l=0, t=10),
hovermode=False,
)
return fig | Visualize camera rays. Args: camera: Camera to visualize. Returns: Plotly lines |
162,360 | from typing import Any, List, Optional, Union
import numpy as np
import plotly.graph_objects as go
import torch
import torch.nn.functional as F
from plotly import express as ex
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import Frustums, RayBundle
from nerfstudio.utils.math import Gaussians
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
The provided code snippet includes necessary dependencies for implementing the `get_camera_frustums` function. Write a Python function `def get_camera_frustums(cameras: Cameras)` to solve the following problem:
Returns the camera frustums for the cameras that we are using. Args: cameras: The cameras that we want to plot. Returns: A plotly scatter that can be plotted.
Here is the function:
def get_camera_frustums(cameras: Cameras):
"""Returns the camera frustums for the cameras that we are using.
Args:
cameras: The cameras that we want to plot.
Returns:
A plotly scatter that can be plotted.
"""
for camera_idx in range(cameras.size):
json_ = cameras.to_json(camera_idx=camera_idx)
print(json_)
raise NotImplementedError | Returns the camera frustums for the cameras that we are using. Args: cameras: The cameras that we want to plot. Returns: A plotly scatter that can be plotted. |
162,362 | from typing import Callable, List
from nerfstudio.utils import comms
The provided code snippet includes necessary dependencies for implementing the `check_profiler_enabled` function. Write a Python function `def check_profiler_enabled(func: Callable) -> Callable` to solve the following problem:
Decorator: check if profiler is enabled
Here is the function:
def check_profiler_enabled(func: Callable) -> Callable:
"""Decorator: check if profiler is enabled"""
def wrapper(self, *args, **kwargs):
ret = None
if self.config.enable_profiler:
ret = func(self, *args, **kwargs)
return ret
return wrapper | Decorator: check if profiler is enabled |
162,363 | from typing import Callable, List
from nerfstudio.utils import comms
The provided code snippet includes necessary dependencies for implementing the `check_viewer_enabled` function. Write a Python function `def check_viewer_enabled(func: Callable) -> Callable` to solve the following problem:
Decorator: check if viewer is enabled and only run on main process
Here is the function:
def check_viewer_enabled(func: Callable) -> Callable:
"""Decorator: check if viewer is enabled and only run on main process"""
def wrapper(self, *args, **kwargs):
ret = None
if self.config.is_viewer_enabled() and comms.is_main_process():
ret = func(self, *args, **kwargs)
return ret
return wrapper | Decorator: check if viewer is enabled and only run on main process |
162,364 | from typing import Callable, List
from nerfstudio.utils import comms
The provided code snippet includes necessary dependencies for implementing the `check_eval_enabled` function. Write a Python function `def check_eval_enabled(func: Callable) -> Callable` to solve the following problem:
Decorator: check if evaluation step is enabled
Here is the function:
def check_eval_enabled(func: Callable) -> Callable:
"""Decorator: check if evaluation step is enabled"""
def wrapper(self, *args, **kwargs):
ret = None
if self.config.is_wandb_enabled() or self.config.is_tensorboard_enabled():
ret = func(self, *args, **kwargs)
return ret
return wrapper | Decorator: check if evaluation step is enabled |
162,366 | from __future__ import annotations
import os
import sys
from pathlib import Path
from typing import Optional, Tuple
import torch
import yaml
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.configs import base_config as cfg
from nerfstudio.pipelines.base_pipeline import Pipeline
def eval_load_checkpoint(config: cfg.TrainerConfig, pipeline: Pipeline) -> Path:
## TODO: ideally eventually want to get this to be the same as whatever is used to load train checkpoint too
"""Helper function to load checkpointed pipeline
Args:
config (DictConfig): Configuration of pipeline to load
pipeline (Pipeline): Pipeline instance of which to load weights
"""
assert config.load_dir is not None
if config.load_step is None:
CONSOLE.print("Loading latest checkpoint from load_dir")
# NOTE: this is specific to the checkpoint name format
if not os.path.exists(config.load_dir):
CONSOLE.rule("Error", style="red")
CONSOLE.print(f"No checkpoint directory found at {config.load_dir}, ", justify="center")
CONSOLE.print(
"Please make sure the checkpoint exists, they should be generated periodically during training",
justify="center",
)
sys.exit(1)
load_step = sorted(int(x[x.find("-") + 1 : x.find(".")]) for x in os.listdir(config.load_dir))[-1]
else:
load_step = config.load_step
load_path = config.load_dir / f"step-{load_step:09d}.ckpt"
assert load_path.exists(), f"Checkpoint {load_path} does not exist"
loaded_state = torch.load(load_path, map_location="cpu")
pipeline.load_pipeline(loaded_state["pipeline"])
CONSOLE.print(f":white_check_mark: Done loading checkpoint from {load_path}")
return load_path
class Pipeline(nn.Module):
"""The intent of this class is to provide a higher level interface for the Model
that will be easy to use for our Trainer class.
This class will contain high level functions for the model like getting the loss
dictionaries and visualization code. It should have ways to get the next iterations
training loss, evaluation loss, and generate whole images for visualization. Each model
class should be 1:1 with a pipeline that can act as a standardized interface and hide
differences in how each model takes in and outputs data.
This class's function is to hide the data manager and model classes from the trainer,
worrying about:
1) Fetching data with the data manager
2) Feeding the model the data and fetching the loss
Hopefully this provides a higher level interface for the trainer to use, and
simplifying the model classes, which each may have different forward() methods
and so on.
Args:
config: configuration to instantiate pipeline
device: location to place model and data
test_mode:
'train': loads train/eval datasets into memory
'test': loads train/test datset into memory
'inference': does not load any dataset into memory
world_size: total number of machines available
local_rank: rank of current machine
Attributes:
datamanager: The data manager that will be used
model: The model that will be used
"""
# pylint: disable=abstract-method
datamanager: DataManager
_model: Model
def model(self):
"""Returns the unwrapped model if in ddp"""
return module_wrapper(self._model)
def device(self):
"""Returns the device that the model is on."""
return self.model.device
def get_train_loss_dict(self, step: int):
"""This function gets your training loss dict. This will be responsible for
getting the next batch of data from the DataManager and interfacing with the
Model class, feeding the data to the model's forward function.
Args:
step: current iteration step to update sampler if using DDP (distributed)
"""
if self.world_size > 1 and step:
assert self.datamanager.train_sampler is not None
self.datamanager.train_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_train(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
return model_outputs, loss_dict, metrics_dict
def get_eval_loss_dict(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
self.eval()
if self.world_size > 1:
assert self.datamanager.eval_sampler is not None
self.datamanager.eval_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_eval(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
self.train()
return model_outputs, loss_dict, metrics_dict
def get_eval_image_metrics_and_images(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
def get_average_eval_image_metrics(self, step: Optional[int] = None):
"""Iterate over all the images in the eval dataset and get the average."""
def load_pipeline(self, loaded_state: Dict[str, Any]) -> None:
"""Load the checkpoint from the given path
Args:
loaded_state: pre-trained model state dict
"""
def get_training_callbacks(
self, training_callback_attributes: TrainingCallbackAttributes
) -> List[TrainingCallback]:
"""Returns the training callbacks from both the Dataloader and the Model."""
def get_param_groups(self) -> Dict[str, List[Parameter]]:
"""Get the param groups for the pipeline.
Returns:
A list of dictionaries containing the pipeline's param groups.
"""
The provided code snippet includes necessary dependencies for implementing the `eval_setup` function. Write a Python function `def eval_setup( config_path: Path, eval_num_rays_per_chunk: Optional[int] = None, test_mode: Literal["test", "val", "inference"] = "test", ) -> Tuple[cfg.Config, Pipeline, Path]` to solve the following problem:
Shared setup for loading a saved pipeline for evaluation. Args: config_path: Path to config YAML file. eval_num_rays_per_chunk: Number of rays per forward pass test_mode: 'val': loads train/val datasets into memory 'test': loads train/test datset into memory 'inference': does not load any dataset into memory Returns: Loaded config, pipeline module, and corresponding checkpoint.
Here is the function:
def eval_setup(
config_path: Path,
eval_num_rays_per_chunk: Optional[int] = None,
test_mode: Literal["test", "val", "inference"] = "test",
) -> Tuple[cfg.Config, Pipeline, Path]:
"""Shared setup for loading a saved pipeline for evaluation.
Args:
config_path: Path to config YAML file.
eval_num_rays_per_chunk: Number of rays per forward pass
test_mode:
'val': loads train/val datasets into memory
'test': loads train/test datset into memory
'inference': does not load any dataset into memory
Returns:
Loaded config, pipeline module, and corresponding checkpoint.
"""
# load save config
config = yaml.load(config_path.read_text(), Loader=yaml.Loader)
assert isinstance(config, cfg.Config)
if eval_num_rays_per_chunk:
config.pipeline.model.eval_num_rays_per_chunk = eval_num_rays_per_chunk
# load checkpoints from wherever they were saved
# TODO: expose the ability to choose an arbitrary checkpoint
config.trainer.load_dir = config.get_checkpoint_dir()
config.pipeline.datamanager.eval_image_indices = None
# setup pipeline (which includes the DataManager)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pipeline = config.pipeline.setup(device=device, test_mode=test_mode)
assert isinstance(pipeline, Pipeline)
pipeline.eval()
# load checkpointed information
checkpoint_path = eval_load_checkpoint(config.trainer, pipeline)
return config, pipeline, checkpoint_path | Shared setup for loading a saved pipeline for evaluation. Args: config_path: Path to config YAML file. eval_num_rays_per_chunk: Number of rays per forward pass test_mode: 'val': loads train/val datasets into memory 'test': loads train/test datset into memory 'inference': does not load any dataset into memory Returns: Loaded config, pipeline module, and corresponding checkpoint. |
162,367 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `get_dict_to_torch` function. Write a Python function `def get_dict_to_torch(stuff: Any, device: Union[torch.device, str] = "cpu", exclude: Optional[List[str]] = None)` to solve the following problem:
Set everything in the dict to the specified torch device. Args: stuff: things to convert to torch device: machine to put the "stuff" on exclude: list of keys to skip over transferring to device
Here is the function:
def get_dict_to_torch(stuff: Any, device: Union[torch.device, str] = "cpu", exclude: Optional[List[str]] = None):
"""Set everything in the dict to the specified torch device.
Args:
stuff: things to convert to torch
device: machine to put the "stuff" on
exclude: list of keys to skip over transferring to device
"""
if isinstance(stuff, dict):
for k, v in stuff.items():
if exclude and k in exclude:
stuff[k] = v
else:
stuff[k] = get_dict_to_torch(v, device)
return stuff
if isinstance(stuff, torch.Tensor):
return stuff.to(device)
return stuff | Set everything in the dict to the specified torch device. Args: stuff: things to convert to torch device: machine to put the "stuff" on exclude: list of keys to skip over transferring to device |
162,368 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `get_dict_to_cpu` function. Write a Python function `def get_dict_to_cpu(stuff: Any)` to solve the following problem:
Set everything in the dict to CPU. Args: stuff: things to place onto cpu
Here is the function:
def get_dict_to_cpu(stuff: Any):
"""Set everything in the dict to CPU.
Args:
stuff: things to place onto cpu
"""
if isinstance(stuff, dict):
for k, v in stuff.items():
stuff[k] = get_dict_to_cpu(v)
return stuff
if isinstance(stuff, torch.Tensor):
return stuff.detach().cpu()
return stuff | Set everything in the dict to CPU. Args: stuff: things to place onto cpu |
162,369 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `get_masked_dict` function. Write a Python function `def get_masked_dict(d, mask)` to solve the following problem:
Return a masked dictionary. TODO(ethan): add more asserts/checks so this doesn't have unpredictable behavior. Args: d: dict to process mask: mask to apply to values in dictionary
Here is the function:
def get_masked_dict(d, mask):
"""Return a masked dictionary.
TODO(ethan): add more asserts/checks so this doesn't have unpredictable behavior.
Args:
d: dict to process
mask: mask to apply to values in dictionary
"""
masked_dict = {}
for key, value in d.items():
masked_dict[key] = value[mask]
return masked_dict | Return a masked dictionary. TODO(ethan): add more asserts/checks so this doesn't have unpredictable behavior. Args: d: dict to process mask: mask to apply to values in dictionary |
162,370 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `scale_dict` function. Write a Python function `def scale_dict(dictionary: Dict[Any, Any], coefficients: Dict[str, float]) -> Dict[Any, Any]` to solve the following problem:
Scale a dictionary in-place given a coefficients dictionary. Args: dictionary: input dict to be scaled. coefficients: scalar dict config for holding coefficients. Returns: Input dict scaled by coefficients.
Here is the function:
def scale_dict(dictionary: Dict[Any, Any], coefficients: Dict[str, float]) -> Dict[Any, Any]:
"""Scale a dictionary in-place given a coefficients dictionary.
Args:
dictionary: input dict to be scaled.
coefficients: scalar dict config for holding coefficients.
Returns:
Input dict scaled by coefficients.
"""
for key in dictionary:
if key in coefficients:
dictionary[key] *= coefficients[key]
return dictionary | Scale a dictionary in-place given a coefficients dictionary. Args: dictionary: input dict to be scaled. coefficients: scalar dict config for holding coefficients. Returns: Input dict scaled by coefficients. |
162,371 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `step_check` function. Write a Python function `def step_check(step, step_size, run_at_zero=False) -> bool` to solve the following problem:
Returns true based on current step and step interval.
Here is the function:
def step_check(step, step_size, run_at_zero=False) -> bool:
"""Returns true based on current step and step interval."""
if step_size == 0:
return False
return (run_at_zero or step != 0) and step % step_size == 0 | Returns true based on current step and step interval. |
162,372 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
The provided code snippet includes necessary dependencies for implementing the `update_avg` function. Write a Python function `def update_avg(prev_avg: float, new_val: float, step: int) -> float` to solve the following problem:
helper to calculate the running average Args: prev_avg (float): previous average value new_val (float): new value to update the average with step (int): current step number Returns: float: new updated average
Here is the function:
def update_avg(prev_avg: float, new_val: float, step: int) -> float:
"""helper to calculate the running average
Args:
prev_avg (float): previous average value
new_val (float): new value to update the average with
step (int): current step number
Returns:
float: new updated average
"""
return (step * prev_avg + new_val) / (step + 1) | helper to calculate the running average Args: prev_avg (float): previous average value new_val (float): new value to update the average with step (int): current step number Returns: float: new updated average |
162,373 | from pathlib import Path
import numpy as np
import pymeshlab
import torch
import trimesh
from skimage import measure
avg_pool_3d = torch.nn.AvgPool3d(2, stride=2)
upsample = torch.nn.Upsample(scale_factor=2, mode="nearest")
def get_surface_sliding(
sdf,
resolution=512,
bounding_box_min=(-1.0, -1.0, -1.0),
bounding_box_max=(1.0, 1.0, 1.0),
return_mesh=False,
level=0,
coarse_mask=None,
output_path: Path = Path("test.ply"),
simplify_mesh=True,
):
assert resolution % 512 == 0
if coarse_mask is not None:
# we need to permute here as pytorch's grid_sample use (z, y, x)
coarse_mask = coarse_mask.permute(2, 1, 0)[None, None].cuda().float()
resN = resolution
cropN = 512
level = 0
N = resN // cropN
grid_min = bounding_box_min
grid_max = bounding_box_max
xs = np.linspace(grid_min[0], grid_max[0], N + 1)
ys = np.linspace(grid_min[1], grid_max[1], N + 1)
zs = np.linspace(grid_min[2], grid_max[2], N + 1)
# print(xs)
# print(ys)
# print(zs)
meshes = []
for i in range(N):
for j in range(N):
for k in range(N):
# print(i, j, k)
x_min, x_max = xs[i], xs[i + 1]
y_min, y_max = ys[j], ys[j + 1]
z_min, z_max = zs[k], zs[k + 1]
x = np.linspace(x_min, x_max, cropN)
y = np.linspace(y_min, y_max, cropN)
z = np.linspace(z_min, z_max, cropN)
xx, yy, zz = np.meshgrid(x, y, z, indexing="ij")
points = torch.tensor(np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T, dtype=torch.float).cuda()
def evaluate(points):
z = []
for _, pnts in enumerate(torch.split(points, 100000, dim=0)):
z.append(sdf(pnts))
z = torch.cat(z, axis=0)
return z
# construct point pyramids
points = points.reshape(cropN, cropN, cropN, 3).permute(3, 0, 1, 2)
if coarse_mask is not None:
# breakpoint()
points_tmp = points.permute(1, 2, 3, 0)[None].cuda()
current_mask = torch.nn.functional.grid_sample(coarse_mask, points_tmp)
current_mask = (current_mask > 0.0).cpu().numpy()[0, 0]
else:
current_mask = None
points_pyramid = [points]
for _ in range(3):
points = avg_pool_3d(points[None])[0]
points_pyramid.append(points)
points_pyramid = points_pyramid[::-1]
# evalute pyramid with mask
mask = None
threshold = 2 * (x_max - x_min) / cropN * 8
for pid, pts in enumerate(points_pyramid):
coarse_N = pts.shape[-1]
pts = pts.reshape(3, -1).permute(1, 0).contiguous()
if mask is None:
# only evaluate
if coarse_mask is not None:
pts_sdf = torch.ones_like(pts[:, 1])
valid_mask = (
torch.nn.functional.grid_sample(coarse_mask, pts[None, None, None])[0, 0, 0, 0] > 0
)
if valid_mask.any():
pts_sdf[valid_mask] = evaluate(pts[valid_mask].contiguous())
else:
pts_sdf = evaluate(pts)
else:
mask = mask.reshape(-1)
pts_to_eval = pts[mask]
if pts_to_eval.shape[0] > 0:
pts_sdf_eval = evaluate(pts_to_eval.contiguous())
pts_sdf[mask] = pts_sdf_eval
# print("ratio", pts_to_eval.shape[0] / pts.shape[0])
if pid < 3:
# update mask
mask = torch.abs(pts_sdf) < threshold
mask = mask.reshape(coarse_N, coarse_N, coarse_N)[None, None]
mask = upsample(mask.float()).bool()
pts_sdf = pts_sdf.reshape(coarse_N, coarse_N, coarse_N)[None, None]
pts_sdf = upsample(pts_sdf)
pts_sdf = pts_sdf.reshape(-1)
threshold /= 2.0
z = pts_sdf.detach().cpu().numpy()
# skip if no surface found
if current_mask is not None:
valid_z = z.reshape(cropN, cropN, cropN)[current_mask]
if valid_z.shape[0] <= 0 or (np.min(valid_z) > level or np.max(valid_z) < level):
continue
if not (np.min(z) > level or np.max(z) < level):
z = z.astype(np.float32)
verts, faces, normals, _ = measure.marching_cubes(
volume=z.reshape(cropN, cropN, cropN), # .transpose([1, 0, 2]),
level=level,
spacing=(
(x_max - x_min) / (cropN - 1),
(y_max - y_min) / (cropN - 1),
(z_max - z_min) / (cropN - 1),
),
mask=current_mask,
)
# print(np.array([x_min, y_min, z_min]))
# print(verts.min(), verts.max())
verts = verts + np.array([x_min, y_min, z_min])
# print(verts.min(), verts.max())
meshcrop = trimesh.Trimesh(verts, faces, normals)
# meshcrop.export(f"{i}_{j}_{k}.ply")
meshes.append(meshcrop)
combined = trimesh.util.concatenate(meshes)
if return_mesh:
return combined
else:
filename = str(output_path)
filename_simplify = str(output_path).replace(".ply", "-simplify.ply")
combined.merge_vertices(digits_vertex=6)
combined.export(filename)
if simplify_mesh:
ms = pymeshlab.MeshSet()
ms.load_new_mesh(filename)
print("simply mesh")
ms.meshing_decimation_quadric_edge_collapse(targetfacenum=2000000)
ms.save_current_mesh(filename_simplify, save_face_color=False) | null |
162,374 | from pathlib import Path
import numpy as np
import pymeshlab
import torch
import trimesh
from skimage import measure
def get_surface_occupancy(
occupancy_fn,
resolution=512,
bounding_box_min=(-1.0, -1.0, -1.0),
bounding_box_max=(1.0, 1.0, 1.0),
return_mesh=False,
level=0.5,
device=None,
output_path: Path = Path("test.ply"),
):
grid_min = bounding_box_min
grid_max = bounding_box_max
N = resolution
xs = np.linspace(grid_min[0], grid_max[0], N)
ys = np.linspace(grid_min[1], grid_max[1], N)
zs = np.linspace(grid_min[2], grid_max[2], N)
xx, yy, zz = np.meshgrid(xs, ys, zs, indexing="ij")
points = torch.tensor(np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T, dtype=torch.float).to(device=device)
def evaluate(points):
z = []
for _, pnts in enumerate(torch.split(points, 100000, dim=0)):
z.append(occupancy_fn(pnts.contiguous()).contiguous())
z = torch.cat(z, axis=0)
return z
z = evaluate(points).detach().cpu().numpy()
if not (np.min(z) > level or np.max(z) < level):
verts, faces, normals, _ = measure.marching_cubes(
volume=z.reshape(resolution, resolution, resolution),
level=level,
spacing=(
(grid_max[0] - grid_min[0]) / (N - 1),
(grid_max[1] - grid_min[1]) / (N - 1),
(grid_max[2] - grid_min[2]) / (N - 1),
),
)
verts = verts + np.array(grid_min)
output_path.parent.mkdir(parents=True, exist_ok=True)
meshexport = trimesh.Trimesh(verts, faces, normals)
meshexport.export(str(output_path))
else:
print("=================================================no surface skip") | null |
162,375 | from pathlib import Path
import numpy as np
import pymeshlab
import torch
import trimesh
from skimage import measure
max_pool_3d = torch.nn.MaxPool3d(3, stride=1, padding=1)
def get_surface_sliding_with_contraction(
sdf,
resolution=512,
bounding_box_min=(-1.0, -1.0, -1.0),
bounding_box_max=(1.0, 1.0, 1.0),
return_mesh=False,
level=0,
coarse_mask=None,
output_path: Path = Path("test.ply"),
simplify_mesh=True,
inv_contraction=None,
max_range=32.0,
):
assert resolution % 512 == 0
resN = resolution
cropN = 512
level = 0
N = resN // cropN
grid_min = bounding_box_min
grid_max = bounding_box_max
xs = np.linspace(grid_min[0], grid_max[0], N + 1)
ys = np.linspace(grid_min[1], grid_max[1], N + 1)
zs = np.linspace(grid_min[2], grid_max[2], N + 1)
# print(xs)
# print(ys)
# print(zs)
meshes = []
for i in range(N):
for j in range(N):
for k in range(N):
print(i, j, k)
x_min, x_max = xs[i], xs[i + 1]
y_min, y_max = ys[j], ys[j + 1]
z_min, z_max = zs[k], zs[k + 1]
x = np.linspace(x_min, x_max, cropN)
y = np.linspace(y_min, y_max, cropN)
z = np.linspace(z_min, z_max, cropN)
xx, yy, zz = np.meshgrid(x, y, z, indexing="ij")
points = torch.tensor(np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T, dtype=torch.float).cuda()
@torch.no_grad()
def evaluate(points):
z = []
for _, pnts in enumerate(torch.split(points, 100000, dim=0)):
z.append(sdf(pnts))
z = torch.cat(z, axis=0)
return z
# construct point pyramids
points = points.reshape(cropN, cropN, cropN, 3)
# query coarse grids
points_tmp = points[None].cuda() * 0.5 # normalize from [-2, 2] to [-1, 1]
current_mask = torch.nn.functional.grid_sample(coarse_mask, points_tmp)
points = points.reshape(-1, 3)
valid_mask = current_mask.reshape(-1) > 0
pts_to_eval = points[valid_mask]
print(current_mask.float().mean())
pts_sdf = torch.ones_like(points[..., 0]) * 100.0
print(pts_sdf.shape, pts_to_eval.shape, points.shape)
if pts_to_eval.shape[0] > 0:
pts_sdf_eval = evaluate(pts_to_eval.contiguous())
pts_sdf[valid_mask.reshape(-1)] = pts_sdf_eval
# use min_pooling to remove masked marching cube artefacts
min_sdf = max_pool_3d(pts_sdf.reshape(1, 1, cropN, cropN, cropN) * -1.0) * -1.0
min_mask = (current_mask > 0.0).float()
pts_sdf = pts_sdf.reshape(1, 1, cropN, cropN, cropN) * min_mask + min_sdf * (1.0 - min_mask)
z = pts_sdf.detach().cpu().numpy()
current_mask = (current_mask > 0.0).cpu().numpy()[0, 0]
# skip if no surface found
if current_mask is not None:
valid_z = z.reshape(cropN, cropN, cropN)[current_mask]
if valid_z.shape[0] <= 0 or (np.min(valid_z) > level or np.max(valid_z) < level):
continue
if not (np.min(z) > level or np.max(z) < level):
z = z.astype(np.float32)
verts, faces, normals, _ = measure.marching_cubes(
volume=z.reshape(cropN, cropN, cropN), # .transpose([1, 0, 2]),
level=level,
spacing=(
(x_max - x_min) / (cropN - 1),
(y_max - y_min) / (cropN - 1),
(z_max - z_min) / (cropN - 1),
),
mask=current_mask,
)
verts = verts + np.array([x_min, y_min, z_min])
meshcrop = trimesh.Trimesh(verts, faces, normals)
meshes.append(meshcrop)
combined = trimesh.util.concatenate(meshes)
combined.merge_vertices(digits_vertex=6)
# inverse contraction and clipping the points range
if inv_contraction is not None:
combined.vertices = inv_contraction(torch.from_numpy(combined.vertices)).numpy()
combined.vertices = np.clip(combined.vertices, -max_range, max_range)
if return_mesh:
return combined
else:
filename = str(output_path)
filename_simplify = str(output_path).replace(".ply", "-simplify.ply")
combined.export(filename)
if simplify_mesh:
ms = pymeshlab.MeshSet()
ms.load_new_mesh(filename)
print("simply mesh")
ms.meshing_decimation_quadric_edge_collapse(targetfacenum=2000000)
ms.save_current_mesh(filename_simplify, save_face_color=False) | null |
162,377 | from math import floor, log
from rich.console import Console
CONSOLE = Console(width=120)
The provided code snippet includes necessary dependencies for implementing the `print_tcnn_speed_warning` function. Write a Python function `def print_tcnn_speed_warning(method_name: str)` to solve the following problem:
Prints a warning about the speed of the TCNN.
Here is the function:
def print_tcnn_speed_warning(method_name: str):
"""Prints a warning about the speed of the TCNN."""
CONSOLE.line()
CONSOLE.print(f"[bold yellow]WARNING: Using a slow implementation of {method_name}. ")
CONSOLE.print(
"[bold yellow]:person_running: :person_running: "
+ "Install tcnn for speedups :person_running: :person_running:"
)
CONSOLE.print("[yellow]pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch")
CONSOLE.line() | Prints a warning about the speed of the TCNN. |
162,378 | from math import floor, log
from rich.console import Console
The provided code snippet includes necessary dependencies for implementing the `human_format` function. Write a Python function `def human_format(num)` to solve the following problem:
Format a number in a more human readable way Args: num: number to format
Here is the function:
def human_format(num):
"""Format a number in a more human readable way
Args:
num: number to format
"""
units = ["", "K", "M", "B", "T", "P"]
k = 1000.0
magnitude = int(floor(log(num, k)))
return f"{(num / k**magnitude):.2f} {units[magnitude]}" | Format a number in a more human readable way Args: num: number to format |
162,379 | from typing import Union
import torch
from torchtyping import TensorType
COLORS_DICT = {
"white": WHITE,
"black": BLACK,
"red": RED,
"green": GREEN,
"blue": BLUE,
}
The provided code snippet includes necessary dependencies for implementing the `get_color` function. Write a Python function `def get_color(color: Union[str, list]) -> TensorType[3]` to solve the following problem:
Args: color (Union[str, list]): Color as a string or a rgb list Returns: TensorType[3]: Parsed color
Here is the function:
def get_color(color: Union[str, list]) -> TensorType[3]:
"""
Args:
color (Union[str, list]): Color as a string or a rgb list
Returns:
TensorType[3]: Parsed color
"""
if isinstance(color, str):
color = color.lower()
if color not in COLORS_DICT:
raise ValueError(f"{color} is not a valid preset color")
return COLORS_DICT[color]
if isinstance(color, list):
if len(color) != 3:
raise ValueError(f"Color should be 3 values (RGB) instead got {color}")
return torch.tensor(color)
raise ValueError(f"Color should be an RGB list or string, instead got {type(color)}") | Args: color (Union[str, list]): Color as a string or a rgb list Returns: TensorType[3]: Parsed color |
162,381 | from __future__ import annotations
import time
from typing import Callable
from rich.console import Console
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils import comms
from nerfstudio.utils.decorators import (
check_main_thread,
check_profiler_enabled,
decorate_all,
)
PROFILER = []
The provided code snippet includes necessary dependencies for implementing the `time_function` function. Write a Python function `def time_function(func: Callable) -> Callable` to solve the following problem:
Decorator: time a function call
Here is the function:
def time_function(func: Callable) -> Callable:
"""Decorator: time a function call"""
def wrapper(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
if PROFILER:
class_str = func.__qualname__
PROFILER[0].update_time(class_str, start, time.time())
return ret
return wrapper | Decorator: time a function call |
162,382 | from __future__ import annotations
import time
from typing import Callable
from rich.console import Console
from nerfstudio.configs import base_config as cfg
from nerfstudio.utils import comms
from nerfstudio.utils.decorators import (
check_main_thread,
check_profiler_enabled,
decorate_all,
)
PROFILER = []
class Profiler:
"""Profiler class"""
def __init__(self, config: cfg.LoggingConfig):
self.config = config
self.profiler_dict = {}
def update_time(self, func_name: str, start_time: float, end_time: float):
"""update the profiler dictionary with running averages of durations
Args:
func_name: the function name that is being profiled
start_time: the start time when function is called
end_time: the end time when function terminated
"""
val = end_time - start_time
func_dict = self.profiler_dict.get(func_name, {"val": 0, "step": 0})
prev_val = func_dict["val"]
prev_step = func_dict["step"]
self.profiler_dict[func_name] = {"val": (prev_val * prev_step + val) / (prev_step + 1), "step": prev_step + 1}
def print_profile(self):
"""helper to print out the profiler stats"""
CONSOLE.print("Printing profiling stats, from longest to shortest duration in seconds")
sorted_keys = sorted(
self.profiler_dict.keys(),
key=lambda k: self.profiler_dict[k]["val"],
reverse=True,
)
for k in sorted_keys:
val = f"{self.profiler_dict[k]['val']:0.4f}"
CONSOLE.print(f"{k:<20}: {val:<20}")
The provided code snippet includes necessary dependencies for implementing the `setup_profiler` function. Write a Python function `def setup_profiler(config: cfg.LoggingConfig)` to solve the following problem:
Initialization of profilers
Here is the function:
def setup_profiler(config: cfg.LoggingConfig):
"""Initialization of profilers"""
if comms.is_main_process():
PROFILER.append(Profiler(config)) | Initialization of profilers |
162,383 | from typing import Optional
import torch
from matplotlib import cm
from torchtyping import TensorType
from nerfstudio.utils import colors
def apply_colormap(image: TensorType["bs":..., 1], cmap="viridis") -> TensorType["bs":..., "rgb":3]:
"""Convert single channel to a color image.
Args:
image: Single channel image.
cmap: Colormap for image.
Returns:
TensorType: Colored image
"""
colormap = cm.get_cmap(cmap)
colormap = torch.tensor(colormap.colors).to(image.device) # type: ignore
image_long = (image * 255).long()
image_long_min = torch.min(image_long)
image_long_max = torch.max(image_long)
assert image_long_min >= 0, f"the min value is {image_long_min}"
assert image_long_max <= 255, f"the max value is {image_long_max}"
return colormap[image_long[..., 0]]
The provided code snippet includes necessary dependencies for implementing the `apply_depth_colormap` function. Write a Python function `def apply_depth_colormap( depth: TensorType["bs":..., 1], accumulation: Optional[TensorType["bs":..., 1]] = None, near_plane: Optional[float] = None, far_plane: Optional[float] = None, cmap="turbo", ) -> TensorType["bs":..., "rgb":3]` to solve the following problem:
Converts a depth image to color for easier analysis. Args: depth: Depth image. accumulation: Ray accumulation used for masking vis. near_plane: Closest depth to consider. If None, use min image value. far_plane: Furthest depth to consider. If None, use max image value. cmap: Colormap to apply. Returns: Colored depth image
Here is the function:
def apply_depth_colormap(
depth: TensorType["bs":..., 1],
accumulation: Optional[TensorType["bs":..., 1]] = None,
near_plane: Optional[float] = None,
far_plane: Optional[float] = None,
cmap="turbo",
) -> TensorType["bs":..., "rgb":3]:
"""Converts a depth image to color for easier analysis.
Args:
depth: Depth image.
accumulation: Ray accumulation used for masking vis.
near_plane: Closest depth to consider. If None, use min image value.
far_plane: Furthest depth to consider. If None, use max image value.
cmap: Colormap to apply.
Returns:
Colored depth image
"""
near_plane = near_plane or float(torch.min(depth))
far_plane = far_plane or float(torch.max(depth))
depth = (depth - near_plane) / (far_plane - near_plane + 1e-10)
depth = torch.clip(depth, 0, 1)
# depth = torch.nan_to_num(depth, nan=0.0) # TODO(ethan): remove this
colored_image = apply_colormap(depth, cmap=cmap)
if accumulation is not None:
colored_image = colored_image * accumulation + (1 - accumulation)
return colored_image | Converts a depth image to color for easier analysis. Args: depth: Depth image. accumulation: Ray accumulation used for masking vis. near_plane: Closest depth to consider. If None, use min image value. far_plane: Furthest depth to consider. If None, use max image value. cmap: Colormap to apply. Returns: Colored depth image |
162,384 | from typing import Optional
import torch
from matplotlib import cm
from torchtyping import TensorType
from nerfstudio.utils import colors
The provided code snippet includes necessary dependencies for implementing the `apply_boolean_colormap` function. Write a Python function `def apply_boolean_colormap( image: TensorType["bs":..., 1, bool], true_color: TensorType["bs":..., "rgb":3] = colors.WHITE, false_color: TensorType["bs":..., "rgb":3] = colors.BLACK, ) -> TensorType["bs":..., "rgb":3]` to solve the following problem:
Converts a depth image to color for easier analysis. Args: image: Boolean image. true_color: Color to use for True. false_color: Color to use for False. Returns: Colored boolean image
Here is the function:
def apply_boolean_colormap(
image: TensorType["bs":..., 1, bool],
true_color: TensorType["bs":..., "rgb":3] = colors.WHITE,
false_color: TensorType["bs":..., "rgb":3] = colors.BLACK,
) -> TensorType["bs":..., "rgb":3]:
"""Converts a depth image to color for easier analysis.
Args:
image: Boolean image.
true_color: Color to use for True.
false_color: Color to use for False.
Returns:
Colored boolean image
"""
colored_image = torch.ones(image.shape[:-1] + (3,))
colored_image[image[..., 0], :] = true_color
colored_image[~image[..., 0], :] = false_color
return colored_image | Converts a depth image to color for easier analysis. Args: image: Boolean image. true_color: Color to use for True. false_color: Color to use for False. Returns: Colored boolean image |
162,385 | from __future__ import annotations
import random
import socket
import traceback
from datetime import timedelta
from typing import Any, Callable, Optional
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tyro
import yaml
from rich.console import Console
from nerfstudio.configs import base_config as cfg
from nerfstudio.configs.config_utils import convert_markup_to_ansi
from nerfstudio.configs.method_configs import AnnotatedBaseConfigUnion
from nerfstudio.engine.trainer import Trainer
from nerfstudio.utils import comms, profiler
torch.backends.cudnn.benchmark = True
torch.set_float32_matmul_precision("high")
def _set_random_seed(seed) -> None:
"""Set randomness seed in torch and numpy"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
class Trainer:
"""Trainer class
Args:
config: The configuration object.
local_rank: Local rank of the process.
world_size: World size of the process.
Attributes:
config: The configuration object.
local_rank: Local rank of the process.
world_size: World size of the process.
device: The device to run the training on.
pipeline: The pipeline object.
optimizers: The optimizers object.
callbacks: The callbacks object.
"""
pipeline: VanillaPipeline
optimizers: Optimizers
callbacks: List[TrainingCallback]
def __init__(self, config: cfg.Config, local_rank: int = 0, world_size: int = 1):
self.config = config
self.local_rank = local_rank
self.world_size = world_size
self.device = "cpu" if world_size == 0 else f"cuda:{local_rank}"
self.mixed_precision = self.config.trainer.mixed_precision
if self.device == "cpu":
self.mixed_precision = False
CONSOLE.print("Mixed precision is disabled for CPU training.")
self._start_step = 0
# optimizers
self.grad_scaler = GradScaler(enabled=self.mixed_precision)
self.base_dir = config.get_base_dir()
# directory to save checkpoints
self.checkpoint_dir = config.get_checkpoint_dir()
CONSOLE.log(f"Saving checkpoints to: {self.checkpoint_dir}")
# set up viewer if enabled
viewer_log_path = self.base_dir / config.viewer.relative_log_filename
self.viewer_state, banner_messages = None, None
if self.config.is_viewer_enabled() and local_rank == 0:
self.viewer_state, banner_messages = viewer_utils.setup_viewer(config.viewer, log_filename=viewer_log_path)
self._check_viewer_warnings()
# set up writers/profilers if enabled
writer_log_path = self.base_dir / config.logging.relative_log_dir
writer.setup_event_writer(config, log_dir=writer_log_path)
writer.setup_local_writer(
config.logging, max_iter=config.trainer.max_num_iterations, banner_messages=banner_messages
)
writer.put_config(name="config", config_dict=dataclasses.asdict(config), step=0)
profiler.setup_profiler(config.logging)
def setup(self, test_mode: Literal["test", "val", "inference"] = "val"):
"""Setup the Trainer by calling other setup functions.
Args:
test_mode:
'val': loads train/val datasets into memory
'test': loads train/test datset into memory
'inference': does not load any dataset into memory
"""
self.pipeline = self.config.pipeline.setup(
device=self.device, test_mode=test_mode, world_size=self.world_size, local_rank=self.local_rank
)
self.optimizers = setup_optimizers(self.config, self.pipeline.get_param_groups())
self._load_checkpoint()
self.callbacks = self.pipeline.get_training_callbacks(
TrainingCallbackAttributes(
optimizers=self.optimizers, # type: ignore
grad_scaler=self.grad_scaler, # type: ignore
pipeline=self.pipeline, # type: ignore
config=self.config.trainer, # type: ignore
)
)
def train(self) -> None:
"""Train the model."""
assert self.pipeline.datamanager.train_dataset is not None, "Missing DatsetInputs"
self._init_viewer_state()
with TimeWriter(writer, EventName.TOTAL_TRAIN_TIME):
num_iterations = self.config.trainer.max_num_iterations
step = 0
for step in range(self._start_step, self._start_step + num_iterations):
with TimeWriter(writer, EventName.ITER_TRAIN_TIME, step=step) as train_t:
self.pipeline.train()
# training callbacks before the training iteration
for callback in self.callbacks:
callback.run_callback_at_location(
step, location=TrainingCallbackLocation.BEFORE_TRAIN_ITERATION
)
# time the forward pass
loss, loss_dict, metrics_dict = self.train_iteration(step)
# training callbacks after the training iteration
for callback in self.callbacks:
callback.run_callback_at_location(step, location=TrainingCallbackLocation.AFTER_TRAIN_ITERATION)
# Skip the first two steps to avoid skewed timings that break the viewer rendering speed estimate.
if step > 1:
writer.put_time(
name=EventName.TRAIN_RAYS_PER_SEC,
duration=self.config.pipeline.datamanager.train_num_rays_per_batch / train_t.duration,
step=step,
avg_over_steps=True,
)
self._update_viewer_state(step)
# a batch of train rays
if step_check(step, self.config.logging.steps_per_log, run_at_zero=True):
writer.put_scalar(name="Train Loss", scalar=loss, step=step)
writer.put_dict(name="Train Loss Dict", scalar_dict=loss_dict, step=step)
writer.put_dict(name="Train Metrics Dict", scalar_dict=metrics_dict, step=step)
self.eval_iteration(step)
if step_check(step, self.config.trainer.steps_per_save):
self.save_checkpoint(step)
writer.write_out_storage()
# save checkpoint at the end of training
self.save_checkpoint(step)
CONSOLE.rule()
CONSOLE.print("[bold green]:tada: :tada: :tada: Training Finished :tada: :tada: :tada:", justify="center")
if not self.config.viewer.quit_on_train_completion:
CONSOLE.print("Use ctrl+c to quit", justify="center")
self._always_render(step)
def _always_render(self, step):
if self.config.is_viewer_enabled():
while True:
self.viewer_state.vis["renderingState/isTraining"].write(False)
self._update_viewer_state(step)
def _check_viewer_warnings(self) -> None:
"""Helper to print out any warnings regarding the way the viewer/loggers are enabled"""
if self.config.is_viewer_enabled():
string = (
"[NOTE] Not running eval iterations since only viewer is enabled."
" Use [yellow]--vis wandb[/yellow] or [yellow]--vis tensorboard[/yellow] to run with eval instead."
)
CONSOLE.print(f"{string}")
def _init_viewer_state(self) -> None:
"""Initializes viewer scene with given train dataset"""
assert self.viewer_state and self.pipeline.datamanager.train_dataset
self.viewer_state.init_scene(
dataset=self.pipeline.datamanager.train_dataset,
start_train=self.config.viewer.start_train,
)
if not self.config.viewer.start_train:
self._always_render(self._start_step)
def _update_viewer_state(self, step: int):
"""Updates the viewer state by rendering out scene with current pipeline
Returns the time taken to render scene.
Args:
step: current train step
"""
assert self.viewer_state is not None
with TimeWriter(writer, EventName.ITER_VIS_TIME, step=step) as _:
num_rays_per_batch = self.config.pipeline.datamanager.train_num_rays_per_batch
try:
self.viewer_state.update_scene(self, step, self.pipeline.model, num_rays_per_batch)
except RuntimeError:
time.sleep(0.03) # sleep to allow buffer to reset
assert self.viewer_state.vis is not None
self.viewer_state.vis["renderingState/log_errors"].write(
"Error: GPU out of memory. Reduce resolution to prevent viewer from crashing."
)
def _update_viewer_rays_per_sec(self, train_t: TimeWriter, vis_t: TimeWriter, step: int):
"""Performs update on rays/sec calclation for training
Args:
train_t: timer object carrying time to execute total training iteration
vis_t: timer object carrying time to execute visualization step
step: current step
"""
train_num_rays_per_batch = self.config.pipeline.datamanager.train_num_rays_per_batch
writer.put_time(
name=EventName.TRAIN_RAYS_PER_SEC,
duration=train_num_rays_per_batch / (train_t.duration - vis_t.duration),
step=step,
avg_over_steps=True,
)
def _load_checkpoint(self) -> None:
"""Helper function to load pipeline and optimizer from prespecified checkpoint"""
load_dir = self.config.trainer.load_dir
if load_dir is not None:
load_step = self.config.trainer.load_step
if load_step is None:
print("Loading latest checkpoint from load_dir")
# NOTE: this is specific to the checkpoint name format
load_step = sorted(int(x[x.find("-") + 1 : x.find(".")]) for x in os.listdir(load_dir))[-1]
load_path = load_dir / f"step-{load_step:09d}.ckpt"
assert load_path.exists(), f"Checkpoint {load_path} does not exist"
loaded_state = torch.load(load_path, map_location="cpu")
self._start_step = loaded_state["step"] + 1
# load the checkpoints for pipeline, optimizers, and gradient scalar
self.pipeline.load_pipeline(loaded_state["pipeline"])
self.optimizers.load_optimizers(loaded_state["optimizers"])
if "schedulers" in loaded_state and self.config.trainer.load_scheduler:
self.optimizers.load_schedulers(loaded_state["schedulers"])
self.grad_scaler.load_state_dict(loaded_state["scalers"])
CONSOLE.print(f"done loading checkpoint from {load_path}")
else:
CONSOLE.print("No checkpoints to load, training from scratch")
def save_checkpoint(self, step: int) -> None:
"""Save the model and optimizers
Args:
step: number of steps in training for given checkpoint
"""
# possibly make the checkpoint directory
if not self.checkpoint_dir.exists():
self.checkpoint_dir.mkdir(parents=True, exist_ok=True)
# save the checkpoint
ckpt_path = self.checkpoint_dir / f"step-{step:09d}.ckpt"
torch.save(
{
"step": step,
"pipeline": self.pipeline.module.state_dict() # type: ignore
if hasattr(self.pipeline, "module")
else self.pipeline.state_dict(),
"optimizers": {k: v.state_dict() for (k, v) in self.optimizers.optimizers.items()},
"schedulers": {k: v.state_dict() for (k, v) in self.optimizers.schedulers.items()},
"scalers": self.grad_scaler.state_dict(),
},
ckpt_path,
)
# possibly delete old checkpoints
if self.config.trainer.save_only_latest_checkpoint:
# delete everything else in the checkpoint folder
for f in self.checkpoint_dir.glob("*"):
if f != ckpt_path:
f.unlink()
def train_iteration(self, step: int) -> Tuple[torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
"""Run one iteration with a batch of inputs. Returns dictionary of model losses.
Args:
step: Current training step.
"""
self.optimizers.zero_grad_all()
cpu_or_cuda_str = self.device.split(":")[0]
for _ in range(self.config.trainer.accumulate_grad_steps):
with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision):
_, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step)
loss = functools.reduce(torch.add, loss_dict.values())
self.grad_scaler.scale(loss).backward() # type: ignore
self.optimizers.optimizer_scaler_step_all(self.grad_scaler)
self.grad_scaler.update()
self.optimizers.scheduler_step_all(step)
# only return the last accumulate_grad_step's loss and metric for logging
# Merging loss and metrics dict into a single output.
return loss, loss_dict, metrics_dict
def eval_iteration(self, step):
"""Run one iteration with different batch/image/all image evaluations depending on step size.
Args:
step: Current training step.
"""
# a batch of eval rays
if step_check(step, self.config.trainer.steps_per_eval_batch, run_at_zero=self.config.trainer.sanity_check):
_, eval_loss_dict, eval_metrics_dict = self.pipeline.get_eval_loss_dict(step=step)
eval_loss = functools.reduce(torch.add, eval_loss_dict.values())
writer.put_scalar(name="Eval Loss", scalar=eval_loss, step=step)
writer.put_dict(name="Eval Loss Dict", scalar_dict=eval_loss_dict, step=step)
writer.put_dict(name="Eval Metrics Dict", scalar_dict=eval_metrics_dict, step=step)
# one eval image
if step_check(step, self.config.trainer.steps_per_eval_image, run_at_zero=self.config.trainer.sanity_check):
with TimeWriter(writer, EventName.TEST_RAYS_PER_SEC, write=False) as test_t:
metrics_dict, images_dict = self.pipeline.get_eval_image_metrics_and_images(step=step)
writer.put_time(
name=EventName.TEST_RAYS_PER_SEC,
duration=metrics_dict["num_rays"] / test_t.duration,
step=step,
avg_over_steps=True,
)
writer.put_dict(name="Eval Images Metrics", scalar_dict=metrics_dict, step=step)
group = "Eval Images"
for image_name, image in images_dict.items():
writer.put_image(name=group + "/" + image_name, image=image, step=step)
# all eval images
if step_check(step, self.config.trainer.steps_per_eval_all_images):
metrics_dict, _ = self.pipeline.get_average_eval_image_metrics(step=step)
writer.put_dict(name="Eval Images Metrics Dict (all images)", scalar_dict=metrics_dict, step=step)
The provided code snippet includes necessary dependencies for implementing the `train_loop` function. Write a Python function `def train_loop(local_rank: int, world_size: int, config: cfg.Config, global_rank: int = 0)` to solve the following problem:
Main training function that sets up and runs the trainer per process Args: local_rank: current rank of process world_size: total number of gpus available config: config file specifying training regimen
Here is the function:
def train_loop(local_rank: int, world_size: int, config: cfg.Config, global_rank: int = 0):
"""Main training function that sets up and runs the trainer per process
Args:
local_rank: current rank of process
world_size: total number of gpus available
config: config file specifying training regimen
"""
_set_random_seed(config.machine.seed + global_rank)
torch.cuda.set_device(local_rank)
trainer = Trainer(config, local_rank, world_size)
trainer.setup()
trainer.train() | Main training function that sets up and runs the trainer per process Args: local_rank: current rank of process world_size: total number of gpus available config: config file specifying training regimen |
162,386 | from __future__ import annotations
import random
import socket
import traceback
from datetime import timedelta
from typing import Any, Callable, Optional
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tyro
import yaml
from rich.console import Console
from nerfstudio.configs import base_config as cfg
from nerfstudio.configs.config_utils import convert_markup_to_ansi
from nerfstudio.configs.method_configs import AnnotatedBaseConfigUnion
from nerfstudio.engine.trainer import Trainer
from nerfstudio.utils import comms, profiler
CONSOLE = Console(width=120)
DEFAULT_TIMEOUT = timedelta(minutes=30)
def _find_free_port() -> str:
"""Finds a free port."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def _distributed_worker(
local_rank: int,
main_func: Callable,
world_size: int,
num_gpus_per_machine: int,
machine_rank: int,
dist_url: str,
config: cfg.Config,
timeout: timedelta = DEFAULT_TIMEOUT,
) -> Any:
"""Spawned distributed worker that handles the initialization of process group and handles the
training process on multiple processes.
Args:
local_rank: Current rank of process.
main_func: Function that will be called by the distributed workers.
world_size: Total number of gpus available.
num_gpus_per_machine: Number of GPUs per machine.
machine_rank: Rank of this machine.
dist_url: URL to connect to for distributed jobs, including protocol
E.g., "tcp://127.0.0.1:8686".
It can be set to "auto" to automatically select a free port on localhost.
config: Config specifying training regimen.
timeout: Timeout of the distributed workers.
Raises:
e: Exception in initializing the process group
Returns:
Any: TODO: determine the return type
"""
assert torch.cuda.is_available(), "cuda is not available. Please check your installation."
global_rank = machine_rank * num_gpus_per_machine + local_rank
dist.init_process_group(
backend="nccl",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
timeout=timeout,
)
assert comms.LOCAL_PROCESS_GROUP is None
num_machines = world_size // num_gpus_per_machine
for i in range(num_machines):
ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
comms.LOCAL_PROCESS_GROUP = pg
assert num_gpus_per_machine <= torch.cuda.device_count()
output = main_func(local_rank, world_size, config, global_rank)
comms.synchronize()
dist.destroy_process_group()
return output
The provided code snippet includes necessary dependencies for implementing the `launch` function. Write a Python function `def launch( main_func: Callable, num_gpus_per_machine: int, num_machines: int = 1, machine_rank: int = 0, dist_url: str = "auto", config: Optional[cfg.Config] = None, timeout: timedelta = DEFAULT_TIMEOUT, ) -> None` to solve the following problem:
Function that spawns muliple processes to call on main_func Args: main_func (Callable): function that will be called by the distributed workers num_gpus_per_machine (int): number of GPUs per machine num_machines (int, optional): total number of machines machine_rank (int, optional): rank of this machine. dist_url (str, optional): url to connect to for distributed jobs. config (Config, optional): config file specifying training regimen. timeout (timedelta, optional): timeout of the distributed workers.
Here is the function:
def launch(
main_func: Callable,
num_gpus_per_machine: int,
num_machines: int = 1,
machine_rank: int = 0,
dist_url: str = "auto",
config: Optional[cfg.Config] = None,
timeout: timedelta = DEFAULT_TIMEOUT,
) -> None:
"""Function that spawns muliple processes to call on main_func
Args:
main_func (Callable): function that will be called by the distributed workers
num_gpus_per_machine (int): number of GPUs per machine
num_machines (int, optional): total number of machines
machine_rank (int, optional): rank of this machine.
dist_url (str, optional): url to connect to for distributed jobs.
config (Config, optional): config file specifying training regimen.
timeout (timedelta, optional): timeout of the distributed workers.
"""
assert config is not None
world_size = num_machines * num_gpus_per_machine
if world_size <= 1:
# world_size=0 uses one CPU in one process.
# world_size=1 uses one GPU in one process.
try:
main_func(local_rank=0, world_size=world_size, config=config)
except KeyboardInterrupt:
# print the stack trace
CONSOLE.print(traceback.format_exc())
finally:
profiler.flush_profiler(config.logging)
elif world_size > 1:
# Using multiple gpus with multiple processes.
if dist_url == "auto":
assert num_machines == 1, "dist_url=auto is not supported for multi-machine jobs."
port = _find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if num_machines > 1 and dist_url.startswith("file://"):
CONSOLE.log("file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://")
process_context = mp.spawn(
_distributed_worker,
nprocs=num_gpus_per_machine,
join=False,
args=(
main_func,
world_size,
num_gpus_per_machine,
machine_rank,
dist_url,
config,
timeout,
),
)
# process_context won't be None because join=False, so it's okay to assert this
# for Pylance reasons
assert process_context is not None
try:
process_context.join()
except KeyboardInterrupt:
for i, process in enumerate(process_context.processes):
if process.is_alive():
CONSOLE.log(f"Terminating process {i}...")
process.terminate()
process.join()
CONSOLE.log(f"Process {i} finished.")
finally:
profiler.flush_profiler(config.logging) | Function that spawns muliple processes to call on main_func Args: main_func (Callable): function that will be called by the distributed workers num_gpus_per_machine (int): number of GPUs per machine num_machines (int, optional): total number of machines machine_rank (int, optional): rank of this machine. dist_url (str, optional): url to connect to for distributed jobs. config (Config, optional): config file specifying training regimen. timeout (timedelta, optional): timeout of the distributed workers. |
162,387 | from __future__ import annotations
import random
import socket
import traceback
from datetime import timedelta
from typing import Any, Callable, Optional
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import tyro
import yaml
from rich.console import Console
from nerfstudio.configs import base_config as cfg
from nerfstudio.configs.config_utils import convert_markup_to_ansi
from nerfstudio.configs.method_configs import AnnotatedBaseConfigUnion
from nerfstudio.engine.trainer import Trainer
from nerfstudio.utils import comms, profiler
def main(config: cfg.Config) -> None:
"""Main function."""
config.set_timestamp()
if config.data:
CONSOLE.log("Using --data alias for --data.pipeline.datamanager.dataparser.data")
config.pipeline.datamanager.dataparser.data = config.data
if config.trainer.load_config:
CONSOLE.log(f"Loading pre-set config from: {config.trainer.load_config}")
config = yaml.load(config.trainer.load_config.read_text(), Loader=yaml.Loader)
# print and save config
config.print_to_terminal()
config.save_config()
launch(
main_func=train_loop,
num_gpus_per_machine=config.machine.num_gpus,
num_machines=config.machine.num_machines,
machine_rank=config.machine.machine_rank,
dist_url=config.machine.dist_url,
config=config,
)
def convert_markup_to_ansi(markup_string: str) -> str:
"""Convert rich-style markup to ANSI sequences for command-line formatting.
Args:
markup_string: Text with rich-style markup.
Returns:
Text formatted via ANSI sequences.
"""
with CONSOLE.capture() as out:
CONSOLE.print(markup_string, soft_wrap=True)
return out.get()
AnnotatedBaseConfigUnion = tyro.conf.SuppressFixed[ # Don't show unparseable (fixed) arguments in helptext.
tyro.conf.FlagConversionOff[
tyro.extras.subcommand_type_from_defaults(defaults=method_configs, descriptions=descriptions)
]
]
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
# Choose a base configuration and override values.
tyro.extras.set_accent_color("bright_yellow")
main(
tyro.cli(
AnnotatedBaseConfigUnion,
description=convert_markup_to_ansi(__doc__),
)
) | Entrypoint for use with pyproject scripts. |
162,388 | import json
import sys
import zipfile
from dataclasses import dataclass
from pathlib import Path
from typing import Union
import numpy as np
import tyro
from rich.console import Console
from typing_extensions import Annotated, Literal
from nerfstudio.process_data import (
colmap_utils,
hloc_utils,
insta360_utils,
metashape_utils,
polycam_utils,
process_data_utils,
record3d_utils,
)
from nerfstudio.process_data.process_data_utils import CAMERA_MODELS
from nerfstudio.utils import install_checks
Commands = Union[
Annotated[ProcessImages, tyro.conf.subcommand(name="images")],
Annotated[ProcessVideo, tyro.conf.subcommand(name="video")],
Annotated[ProcessPolycam, tyro.conf.subcommand(name="polycam")],
Annotated[ProcessMetashape, tyro.conf.subcommand(name="metashape")],
Annotated[ProcessInsta360, tyro.conf.subcommand(name="insta360")],
Annotated[ProcessRecord3D, tyro.conf.subcommand(name="record3d")],
]
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(Commands).main() | Entrypoint for use with pyproject scripts. |
162,389 | from __future__ import annotations
import os
import shutil
import tarfile
import zipfile
from dataclasses import dataclass
from pathlib import Path
from typing import Union
import gdown
import tyro
from rich.console import Console
from typing_extensions import Annotated
from nerfstudio.configs.base_config import PrintableConfig
The provided code snippet includes necessary dependencies for implementing the `grab_file_id` function. Write a Python function `def grab_file_id(zip_url: str) -> str` to solve the following problem:
Get the file id from the google drive zip url.
Here is the function:
def grab_file_id(zip_url: str) -> str:
"""Get the file id from the google drive zip url."""
s = zip_url.split("/d/")[1]
return s.split("/")[0] | Get the file id from the google drive zip url. |
162,390 | from __future__ import annotations
import os
import shutil
import tarfile
import zipfile
from dataclasses import dataclass
from pathlib import Path
from typing import Union
import gdown
import tyro
from rich.console import Console
from typing_extensions import Annotated
from nerfstudio.configs.base_config import PrintableConfig
The provided code snippet includes necessary dependencies for implementing the `download_capture_name` function. Write a Python function `def download_capture_name(save_dir: Path, dataset_name: str, capture_name: str, capture_name_to_file_id: dict)` to solve the following problem:
Download specific captures a given dataset and capture name.
Here is the function:
def download_capture_name(save_dir: Path, dataset_name: str, capture_name: str, capture_name_to_file_id: dict):
"""Download specific captures a given dataset and capture name."""
url = f"https://drive.google.com/uc?id={capture_name_to_file_id[capture_name]}"
target_path = str(save_dir / f"{dataset_name}/{capture_name}")
os.makedirs(target_path, exist_ok=True)
download_path = Path(f"{target_path}.zip")
tmp_path = str(save_dir / ".temp")
shutil.rmtree(tmp_path, ignore_errors=True)
os.makedirs(tmp_path, exist_ok=True)
gdown.download(url, output=str(download_path))
with zipfile.ZipFile(download_path, "r") as zip_ref:
zip_ref.extractall(tmp_path)
inner_folders = os.listdir(tmp_path)
assert len(inner_folders) == 1, "There is more than one folder inside this zip file."
folder = os.path.join(tmp_path, inner_folders[0])
shutil.rmtree(target_path)
shutil.move(folder, target_path)
shutil.rmtree(tmp_path)
os.remove(download_path) | Download specific captures a given dataset and capture name. |
162,391 | from __future__ import annotations
import os
import shutil
import tarfile
import zipfile
from dataclasses import dataclass
from pathlib import Path
from typing import Union
import gdown
import tyro
from rich.console import Console
from typing_extensions import Annotated
from nerfstudio.configs.base_config import PrintableConfig
Commands = Union[
Annotated[BlenderDownload, tyro.conf.subcommand(name="blender")],
Annotated[FriendsDownload, tyro.conf.subcommand(name="friends")],
Annotated[NerfstudioDownload, tyro.conf.subcommand(name="nerfstudio")],
Annotated[Record3dDownload, tyro.conf.subcommand(name="record3d")],
Annotated[DNerfDownload, tyro.conf.subcommand(name="dnerf")],
Annotated[PhototourismDownload, tyro.conf.subcommand(name="phototourism")],
Annotated[SDFstudioDemoDownload, tyro.conf.subcommand(name="sdfstudio")],
Annotated[Mipnerf360Download, tyro.conf.subcommand(name="mipnerf360")],
]
def main(
dataset: DatasetDownload,
):
"""Script to download existing datasets.
We currently support the following datasets:
- nerfstudio: Growing collection of real-world scenes. Use the `capture_name` argument to specify
which capture to download.
- blender: Blender synthetic scenes realeased with NeRF.
- friends: Friends TV show scenes.
- record3d: Record3d dataset.
- dnerf: D-NeRF dataset.
- phototourism: PhotoTourism dataset. Use the `capture_name` argument to specify which capture to download.
Args:
dataset: The dataset to download (from).
"""
dataset.save_dir.mkdir(parents=True, exist_ok=True)
dataset.download(dataset.save_dir)
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
main(tyro.cli(Commands)) | Entrypoint for use with pyproject scripts. |
162,392 | import subprocess
import sys
import tyro
from rich.console import Console
from rich.style import Style
CONSOLE = Console(width=120)
The provided code snippet includes necessary dependencies for implementing the `run_command` function. Write a Python function `def run_command(command: str) -> None` to solve the following problem:
Run a command kill actions if it fails Args: command: command to run
Here is the function:
def run_command(command: str) -> None:
"""Run a command kill actions if it fails
Args:
command: command to run
"""
ret_code = subprocess.call(command, shell=True)
if ret_code != 0:
CONSOLE.print(f"[bold red]Error: `{command}` failed. Exiting...")
sys.exit(1) | Run a command kill actions if it fails Args: command: command to run |
162,393 | from __future__ import annotations
import json
from dataclasses import dataclass, field
from pathlib import Path
from typing import List
import cv2
import mediapy as media
import numpy as np
import open3d as o3d
import torch
import tyro
from rich.console import Console
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation, Slerp
from typing_extensions import Literal, assert_never
from nerfstudio.cameras.camera_paths import (
generate_ellipse_path,
get_path_from_json,
get_spiral_path,
)
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.configs.base_config import Config
from nerfstudio.data.datamanagers.base_datamanager import AnnotatedDataParserUnion
from nerfstudio.data.dataparsers.sdfstudio_dataparser import SDFStudioDataParserConfig
from nerfstudio.utils import install_checks
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
The provided code snippet includes necessary dependencies for implementing the `_interpolate_trajectory` function. Write a Python function `def _interpolate_trajectory(cameras: Cameras, num_views: int = 300)` to solve the following problem:
calculate interpolate path
Here is the function:
def _interpolate_trajectory(cameras: Cameras, num_views: int = 300):
"""calculate interpolate path"""
c2ws = np.stack(cameras.camera_to_worlds.cpu().numpy())
key_rots = Rotation.from_matrix(c2ws[:, :3, :3])
key_times = list(range(len(c2ws)))
slerp = Slerp(key_times, key_rots)
interp = interp1d(key_times, c2ws[:, :3, 3], axis=0)
render_c2ws = []
for i in range(num_views):
time = float(i) / num_views * (len(c2ws) - 1)
cam_location = interp(time)
cam_rot = slerp(time).as_matrix()
c2w = np.eye(4)
c2w[:3, :3] = cam_rot
c2w[:3, 3] = cam_location
render_c2ws.append(c2w)
render_c2ws = torch.from_numpy(np.stack(render_c2ws, axis=0))
# use intrinsic of first camera
camera_path = Cameras(
fx=cameras[0].fx,
fy=cameras[0].fy,
cx=cameras[0].cx,
cy=cameras[0].cy,
height=cameras[0].height,
width=cameras[0].width,
camera_to_worlds=render_c2ws[:, :3, :4],
camera_type=cameras[0].camera_type,
)
return camera_path | calculate interpolate path |
162,394 | from __future__ import annotations
import json
from dataclasses import dataclass, field
from pathlib import Path
from typing import List
import cv2
import mediapy as media
import numpy as np
import open3d as o3d
import torch
import tyro
from rich.console import Console
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation, Slerp
from typing_extensions import Literal, assert_never
from nerfstudio.cameras.camera_paths import (
generate_ellipse_path,
get_path_from_json,
get_spiral_path,
)
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.configs.base_config import Config
from nerfstudio.data.datamanagers.base_datamanager import AnnotatedDataParserUnion
from nerfstudio.data.dataparsers.sdfstudio_dataparser import SDFStudioDataParserConfig
from nerfstudio.utils import install_checks
CONSOLE = Console(width=120)
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
The provided code snippet includes necessary dependencies for implementing the `_render_trajectory_video` function. Write a Python function `def _render_trajectory_video( meshfile: Path, cameras: Cameras, output_filename: Path, rendered_output_names: str, rendered_resolution_scaling_factor: float = 1.0, seconds: float = 5.0, output_format: Literal["images", "video"] = "video", merge_type: Literal["half", "concat"] = "half", ) -> None` to solve the following problem:
Helper function to create a video of the spiral trajectory. Args: pipeline: Pipeline to evaluate with. cameras: Cameras to render. output_filename: Name of the output file. rendered_output_names: List of outputs to visualise. rendered_resolution_scaling_factor: Scaling factor to apply to the camera image resolution. seconds: Length of output video. output_format: How to save output data.
Here is the function:
def _render_trajectory_video(
meshfile: Path,
cameras: Cameras,
output_filename: Path,
rendered_output_names: str,
rendered_resolution_scaling_factor: float = 1.0,
seconds: float = 5.0,
output_format: Literal["images", "video"] = "video",
merge_type: Literal["half", "concat"] = "half",
) -> None:
"""Helper function to create a video of the spiral trajectory.
Args:
pipeline: Pipeline to evaluate with.
cameras: Cameras to render.
output_filename: Name of the output file.
rendered_output_names: List of outputs to visualise.
rendered_resolution_scaling_factor: Scaling factor to apply to the camera image resolution.
seconds: Length of output video.
output_format: How to save output data.
"""
CONSOLE.print("[bold green]Creating trajectory video")
images = []
cameras.rescale_output_resolution(rendered_resolution_scaling_factor)
# cameras = cameras.to(pipeline.device)
width = cameras[0].width[0].item()
height = cameras[0].height[0].item()
ply = o3d.io.read_triangle_mesh(str(meshfile))
ply.compute_vertex_normals()
ply.paint_uniform_color([1, 1, 1])
vis = o3d.visualization.VisualizerWithKeyCallback()
vis.create_window("rendering", width=width, height=height)
vis.add_geometry(ply)
vis.get_render_option().load_from_json("scripts/render.json")
output_image_dir = output_filename.parent / output_filename.stem
for render_name in rendered_output_names:
output_image_dir_cur = output_image_dir / render_name
output_image_dir_cur.mkdir(parents=True, exist_ok=True)
num_frames = cameras.size
index = -1
rendered_images = []
def move_forward(vis):
# This function is called within the o3d.visualization.Visualizer::run() loop
# The run loop calls the function, then re-render
# So the sequence in this function is to:
# 1. Capture frame
# 2. index++, check ending criteria
# 3. Set camera
# 4. (Re-render)
ctr = vis.get_view_control()
nonlocal index
nonlocal cameras
nonlocal rendered_images
if index >= 0:
images = []
for render_name in rendered_output_names:
output_image_dir_cur = output_image_dir / render_name
if render_name == "normal":
vis.get_render_option().mesh_color_option = o3d.visualization.MeshColorOption.Normal
elif render_name == "rgb":
vis.get_render_option().mesh_color_option = o3d.visualization.MeshColorOption.Color
vis.capture_screen_image(str(output_image_dir_cur / f"{index:05d}.png"), True)
images.append(cv2.imread(str(output_image_dir_cur / f"{index:05d}.png"))[:, :, ::-1])
if merge_type == "concat":
images = np.concatenate(images, axis=1)
elif merge_type == "half":
mask = np.zeros_like(images[0])
mask[:, : mask.shape[1] // 2, :] = 1
images = images[0] * mask + images[1] * (1 - mask)
rendered_images.append(images)
index = index + 1
if index < num_frames:
param = ctr.convert_to_pinhole_camera_parameters()
camera = cameras[index]
width = camera.width[0].item()
height = camera.height[0].item()
fx = camera.fx[0].item()
fy = camera.fy[0].item()
cx = camera.cx[0].item()
cy = camera.cy[0].item()
camera = cameras[index]
param.intrinsic.set_intrinsics(width=width, height=height, fx=fx, fy=fy, cx=cx, cy=cy)
extrinsic = np.eye(4)
extrinsic[:3, :] = camera.camera_to_worlds.cpu().numpy()
extrinsic[:3, 1:3] *= -1
param.extrinsic = np.linalg.inv(extrinsic)
ctr.convert_from_pinhole_camera_parameters(param, allow_arbitrary=True)
else:
vis.register_animation_callback(None)
vis.destroy_window()
return False
vis.register_animation_callback(move_forward)
vis.run()
if output_format == "video":
fps = len(rendered_images) / seconds
# rendered_images = rendered_images + rendered_images[::-1]
media.write_video(output_filename, rendered_images, fps=fps) | Helper function to create a video of the spiral trajectory. Args: pipeline: Pipeline to evaluate with. cameras: Cameras to render. output_filename: Name of the output file. rendered_output_names: List of outputs to visualise. rendered_resolution_scaling_factor: Scaling factor to apply to the camera image resolution. seconds: Length of output video. output_format: How to save output data. |
162,395 | from __future__ import annotations
import json
from dataclasses import dataclass, field
from pathlib import Path
from typing import List
import cv2
import mediapy as media
import numpy as np
import open3d as o3d
import torch
import tyro
from rich.console import Console
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation, Slerp
from typing_extensions import Literal, assert_never
from nerfstudio.cameras.camera_paths import (
generate_ellipse_path,
get_path_from_json,
get_spiral_path,
)
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.configs.base_config import Config
from nerfstudio.data.datamanagers.base_datamanager import AnnotatedDataParserUnion
from nerfstudio.data.dataparsers.sdfstudio_dataparser import SDFStudioDataParserConfig
from nerfstudio.utils import install_checks
class RenderTrajectory:
"""Load a checkpoint, render a trajectory, and save to a video file."""
# Path to config YAML file.
meshfile: Path
# Name of the renderer outputs to use. rgb, depth, etc. concatenates them along y axis
rendered_output_names: List[str] = field(default_factory=lambda: ["rgb", "normal"])
# Trajectory to render.
traj: Literal["spiral", "filename", "interpolate", "ellipse"] = "spiral"
# Scaling factor to apply to the camera image resolution.
downscale_factor: int = 1
# Filename of the camera path to render.
camera_path_filename: Path = Path("camera_path.json")
# Name of the output file.
output_path: Path = Path("renders/output.mp4")
# How long the video should be.
seconds: float = 5.0
# pfs of the video
fps: int = 24
# How to save output data.
output_format: Literal["images", "video"] = "video"
merge_type: Literal["half", "concat"] = "half"
data: AnnotatedDataParserUnion = SDFStudioDataParserConfig()
num_views: int = 300
def main(self) -> None:
"""Main function."""
install_checks.check_ffmpeg_installed()
seconds = self.seconds
if self.output_format == "video":
assert str(self.output_path)[-4:] == ".mp4"
if self.traj == "filename":
with open(self.camera_path_filename, "r", encoding="utf-8") as f:
camera_path = json.load(f)
seconds = camera_path["seconds"]
camera_path = get_path_from_json(camera_path)
elif self.traj == "interpolate":
# load training data and interpolate path
outputs = self.data.setup()._generate_dataparser_outputs()
camera_path = _interpolate_trajectory(cameras=outputs.cameras, num_views=self.num_views)
seconds = camera_path.size / 24
elif self.traj == "spiral":
outputs = self.data.setup()._generate_dataparser_outputs()
camera_path = get_spiral_path(camera=outputs.cameras, steps=self.num_views, radius=1.0)
seconds = camera_path.size / 24
elif self.traj == "ellipse":
outputs = self.data.setup()._generate_dataparser_outputs()
camera_path = generate_ellipse_path(cameras=outputs.cameras, n_frames=self.num_views, const_speed=False)
seconds = camera_path.size / self.fps
else:
assert_never(self.traj)
_render_trajectory_video(
self.meshfile,
camera_path,
output_filename=self.output_path,
rendered_output_names=self.rendered_output_names,
rendered_resolution_scaling_factor=1.0 / self.downscale_factor,
seconds=seconds,
output_format=self.output_format,
merge_type=self.merge_type,
)
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(RenderTrajectory).main() | Entrypoint for use with pyproject scripts. |
162,396 | from __future__ import annotations
import sys
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import open3d as o3d
import torch
import tyro
from rich.console import Console
from typing_extensions import Annotated, Literal
from nerfstudio.cameras.rays import RayBundle
from nerfstudio.exporter import texture_utils, tsdf_utils
from nerfstudio.exporter.exporter_utils import (
generate_point_cloud,
get_mesh_from_filename,
)
from nerfstudio.pipelines.base_pipeline import Pipeline
from nerfstudio.utils.eval_utils import eval_setup
Commands = Union[
Annotated[ExportPointCloud, tyro.conf.subcommand(name="pointcloud")],
Annotated[ExportTSDFMesh, tyro.conf.subcommand(name="tsdf")],
Annotated[ExportPoissonMesh, tyro.conf.subcommand(name="poisson")],
Annotated[ExportMarchingCubesMesh, tyro.conf.subcommand(name="marching-cubes")],
]
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(tyro.conf.FlagConversionOff[Commands]).main() | Entrypoint for use with pyproject scripts. |
162,397 | import concurrent.futures
import itertools
import os
import pathlib
import random
import shutil
import stat
import subprocess
import sys
from typing import List, Union
import tyro
from rich.console import Console
from rich.prompt import Confirm
from typing_extensions import Literal, assert_never
The provided code snippet includes necessary dependencies for implementing the `_check_tyro_cli` function. Write a Python function `def _check_tyro_cli(script_path: pathlib.Path) -> bool` to solve the following problem:
Check if a path points to a script containing a tyro.cli() call. Also checks for any permissions/shebang issues. Args: script_path: Path to prospective CLI. Returns: True if a completion is can be generated.
Here is the function:
def _check_tyro_cli(script_path: pathlib.Path) -> bool:
"""Check if a path points to a script containing a tyro.cli() call. Also checks
for any permissions/shebang issues.
Args:
script_path: Path to prospective CLI.
Returns:
True if a completion is can be generated.
"""
assert script_path.suffix == ".py"
script_src = script_path.read_text()
if '\nif __name__ == "__main__":\n' in script_src:
# Check script for execute permissions. For consistency, note that we apply this
# and the shebang check even if tyro isn't used.
execute_flags = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if not script_path.stat().st_mode & execute_flags and Confirm.ask(
f"[yellow]:warning: {script_path} is not marked as executable. Fix?[/yellow]",
default=True,
):
script_path.chmod(script_path.stat().st_mode | execute_flags)
# Check that script has a shebang.
if not script_src.startswith("#!/") and Confirm.ask(
f"[yellow]:warning: {script_path} is missing a shebang. Fix?[/yellow]",
default=True,
):
script_path.write_text("#!/usr/bin/env python\n" + script_src)
# Return True only if compatible with tyro.
return "import tyro" in script_src and "tyro.cli" in script_src
return False | Check if a path points to a script containing a tyro.cli() call. Also checks for any permissions/shebang issues. Args: script_path: Path to prospective CLI. Returns: True if a completion is can be generated. |
162,398 | import concurrent.futures
import itertools
import os
import pathlib
import random
import shutil
import stat
import subprocess
import sys
from typing import List, Union
import tyro
from rich.console import Console
from rich.prompt import Confirm
from typing_extensions import Literal, assert_never
ShellType = Literal["zsh", "bash"]
CONSOLE = Console(width=120)
The provided code snippet includes necessary dependencies for implementing the `_generate_completion` function. Write a Python function `def _generate_completion( path_or_entrypoint: Union[pathlib.Path, str], shell: ShellType, completions_dir: pathlib.Path ) -> pathlib.Path` to solve the following problem:
Given a path to a tyro CLI, write a completion script to a target directory. Args: script_path: Path to Python CLI to generate completion script for. shell: Shell to generate completion script for. completions_dir: Directory to write completion script to. Returns: Success flag.
Here is the function:
def _generate_completion(
path_or_entrypoint: Union[pathlib.Path, str], shell: ShellType, completions_dir: pathlib.Path
) -> pathlib.Path:
"""Given a path to a tyro CLI, write a completion script to a target directory.
Args:
script_path: Path to Python CLI to generate completion script for.
shell: Shell to generate completion script for.
completions_dir: Directory to write completion script to.
Returns:
Success flag.
"""
if isinstance(path_or_entrypoint, pathlib.Path):
# Scripts.
target_name = "_" + path_or_entrypoint.name.replace(".", "_")
args = [sys.executable, str(path_or_entrypoint), "--tyro-print-completion", shell]
elif isinstance(path_or_entrypoint, str):
# Entry points.
target_name = "_" + path_or_entrypoint
args = [path_or_entrypoint, "--tyro-print-completion", shell]
else:
assert_never(path_or_entrypoint)
target_path = completions_dir / shell / target_name
# Generate and write the new completion.
try:
new = subprocess.run(
args=args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf8",
check=True,
).stdout
except subprocess.CalledProcessError as e:
CONSOLE.log(f":x: Completion script generation failed: {args}")
if e.stdout is not None and len(e.stdout) > 0:
CONSOLE.log(e.stdout)
if e.stderr is not None and len(e.stderr) > 0:
CONSOLE.log(e.stderr)
raise e
target_path.parent.mkdir(parents=True, exist_ok=True)
if not target_path.exists():
target_path.write_text(new)
CONSOLE.log(f":heavy_check_mark: Wrote new completion to {target_path}!")
elif target_path.read_text().strip() != new.strip():
target_path.write_text(new)
CONSOLE.log(f":heavy_check_mark: Updated completion at {target_path}!")
else:
CONSOLE.log(f"[dim]:heavy_check_mark: Nothing to do for {target_path}[/dim].")
return target_path | Given a path to a tyro CLI, write a completion script to a target directory. Args: script_path: Path to Python CLI to generate completion script for. shell: Shell to generate completion script for. completions_dir: Directory to write completion script to. Returns: Success flag. |
162,399 | import concurrent.futures
import itertools
import os
import pathlib
import random
import shutil
import stat
import subprocess
import sys
from typing import List, Union
import tyro
from rich.console import Console
from rich.prompt import Confirm
from typing_extensions import Literal, assert_never
ConfigureMode = Literal["install", "uninstall"]
ShellType = Literal["zsh", "bash"]
CONSOLE = Console(width=120)
def _exclamation() -> str:
return random.choice(["Cool", "Nice", "Neat", "Great", "Exciting", "Excellent", "Ok"]) + "!"
The provided code snippet includes necessary dependencies for implementing the `_update_rc` function. Write a Python function `def _update_rc( completions_dir: pathlib.Path, mode: ConfigureMode, shell: ShellType, ) -> None` to solve the following problem:
Try to add a `source /.../completions/setup.{shell}` line automatically to a user's zshrc or bashrc. Args: completions_dir: Path to location of this script. shell: Shell to install completion scripts for. mode: Install or uninstall completions.
Here is the function:
def _update_rc(
completions_dir: pathlib.Path,
mode: ConfigureMode,
shell: ShellType,
) -> None:
"""Try to add a `source /.../completions/setup.{shell}` line automatically to a user's zshrc or bashrc.
Args:
completions_dir: Path to location of this script.
shell: Shell to install completion scripts for.
mode: Install or uninstall completions.
"""
# Install or uninstall `source_line`.
header_line = "# Source nerfstudio autocompletions."
if shell == "zsh":
source_lines = "\n".join(
[
"",
header_line,
"if ! command -v compdef &> /dev/null; then",
" autoload -Uz compinit",
" compinit",
"fi",
f"source {completions_dir / 'setup.zsh'}",
]
)
elif shell == "bash":
source_lines = "\n".join(
[
"",
header_line,
f"source {completions_dir / 'setup.bash'}",
]
)
else:
assert_never(shell)
rc_path = pathlib.Path(os.environ["HOME"]) / f".{shell}rc"
# Always try to uninstall previous completions.
rc_source = rc_path.read_text()
while header_line in rc_source:
before_install, _, after_install = rc_source.partition(header_line)
source_file, _, after_install = after_install.partition("\nsource ")[2].partition("\n")
assert source_file.endswith(f"/completions/setup.{shell}")
rc_source = before_install + after_install
rc_path.write_text(rc_source)
CONSOLE.log(f":broom: Existing completions uninstalled from {rc_path}.")
# Install completions.
if mode == "install":
assert source_lines not in rc_source
rc_path.write_text(rc_source.rstrip() + "\n" + source_lines)
CONSOLE.log(
f":person_gesturing_ok: Completions installed to {rc_path}. {_exclamation()} Open a new shell to try them"
" out."
)
else:
assert mode == "uninstall" | Try to add a `source /.../completions/setup.{shell}` line automatically to a user's zshrc or bashrc. Args: completions_dir: Path to location of this script. shell: Shell to install completion scripts for. mode: Install or uninstall completions. |
162,400 | import concurrent.futures
import itertools
import os
import pathlib
import random
import shutil
import stat
import subprocess
import sys
from typing import List, Union
import tyro
from rich.console import Console
from rich.prompt import Confirm
from typing_extensions import Literal, assert_never
def main(mode: ConfigureMode = "install") -> None:
"""Main script.
Args:
mode: Choose between installing or uninstalling completions.
shells: Shell(s) to install or uninstall.
"""
if "HOME" not in os.environ:
CONSOLE.log("[bold red]$HOME is not set. Exiting.")
return
# Try to locate the user's bashrc or zshrc.
shells_supported: List[ShellType] = ["zsh", "bash"]
shells_found: List[ShellType] = []
for shell in shells_supported:
rc_path = pathlib.Path(os.environ["HOME"]) / f".{shell}rc"
if not rc_path.exists():
CONSOLE.log(f":person_shrugging: {rc_path.name} not found, skipping.")
else:
CONSOLE.log(f":mag: Found {rc_path.name}!")
shells_found.append(shell)
# Get scripts/ directory.
completions_dir = pathlib.Path(__file__).absolute().parent
scripts_dir = completions_dir.parent
assert completions_dir.name == "completions"
assert scripts_dir.name == "scripts"
# Install mode: Generate completion for each tyro script.
if mode == "uninstall":
for shell in shells_supported:
# Reset target directory for each shell type.
target_dir = completions_dir / shell
if target_dir.exists():
assert target_dir.is_dir()
shutil.rmtree(target_dir, ignore_errors=True)
CONSOLE.log(f":broom: Deleted existing completion directory: {target_dir}.")
else:
CONSOLE.log(f":heavy_check_mark: No existing completions at: {target_dir}.")
elif mode == "install":
# Set to True to install completions for scripts as well.
include_scripts = False
# Find tyro CLIs.
script_paths = list(filter(_check_tyro_cli, scripts_dir.glob("**/*.py"))) if include_scripts else []
script_names = tuple(p.name for p in script_paths)
assert len(set(script_names)) == len(script_names)
# Get existing completion files.
existing_completions = set()
for shell in shells_supported:
target_dir = completions_dir / shell
if target_dir.exists():
existing_completions |= set(target_dir.glob("*"))
# Run generation jobs.
concurrent_executor = concurrent.futures.ThreadPoolExecutor()
with CONSOLE.status("[bold]:writing_hand: Generating completions...", spinner="bouncingBall"):
completion_paths = list(
concurrent_executor.map(
lambda path_or_entrypoint_and_shell: _generate_completion(
path_or_entrypoint_and_shell[0], path_or_entrypoint_and_shell[1], completions_dir
),
itertools.product(script_paths + ENTRYPOINTS, shells_found),
)
)
# Delete obsolete completion files.
for unexpected_path in set(p.absolute() for p in existing_completions) - set(
p.absolute() for p in completion_paths
):
if unexpected_path.is_dir():
shutil.rmtree(unexpected_path)
elif unexpected_path.exists():
unexpected_path.unlink()
CONSOLE.log(f":broom: Deleted {unexpected_path}.")
else:
assert_never(mode)
# Install or uninstall from bashrc/zshrc.
for shell in shells_found:
_update_rc(completions_dir, mode, shell)
CONSOLE.print("[bold]All done![/bold]")
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(main, description=__doc__) | Entrypoint for use with pyproject scripts. |
162,401 | import json
import os
from enum import Enum
from pathlib import Path
import cv2
import numpy as np
import pyrender
import torch
import trimesh
import yaml
from tqdm import tqdm
from nerfstudio.data.utils import colmap_utils
from nerfstudio.model_components.ray_samplers import save_points
class CameraModel(Enum):
"""Enum for camera types."""
OPENCV = "OPENCV"
OPENCV_FISHEYE = "OPENCV_FISHEYE"
def show_result(seg):
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
return color_seg
class Renderer:
def __init__(self, height=480, width=640):
self.renderer = pyrender.OffscreenRenderer(width, height)
self.scene = pyrender.Scene()
self.render_flags = pyrender.RenderFlags.SKIP_CULL_FACES
def __call__(self, height, width, intrinsics, pose, mesh):
self.renderer.viewport_height = height
self.renderer.viewport_width = width
self.scene.clear()
self.scene.add(mesh)
cam = pyrender.IntrinsicsCamera(
cx=intrinsics[0, 2], cy=intrinsics[1, 2], fx=intrinsics[0, 0], fy=intrinsics[1, 1]
)
self.scene.add(cam, pose=self.fix_pose(pose))
# flags = pyrender.constants.RenderFlags.OFFSCREEN
return self.renderer.render(self.scene, flags=self.render_flags)
def fix_pose(self, pose):
# 3D Rotation about the x-axis.
t = np.pi
c = np.cos(t)
s = np.sin(t)
R = np.array([[1, 0, 0], [0, c, -s], [0, s, c]])
axis_transform = np.eye(4)
axis_transform[:3, :3] = R
return pose @ axis_transform
def mesh_opengl(self, mesh):
return pyrender.Mesh.from_trimesh(mesh)
def delete(self):
self.renderer.delete()
def save_points(path_save, pts, colors=None, normals=None, BRG2RGB=False):
"""save points to point cloud using open3d"""
assert len(pts) > 0
if colors is not None:
assert colors.shape[1] == 3
assert pts.shape[1] == 3
import numpy as np
import open3d as o3d
cloud = o3d.geometry.PointCloud()
cloud.points = o3d.utility.Vector3dVector(pts)
if colors is not None:
# Open3D assumes the color values are of float type and in range [0, 1]
if np.max(colors) > 1:
colors = colors / np.max(colors)
if BRG2RGB:
colors = np.stack([colors[:, 2], colors[:, 1], colors[:, 0]], axis=-1)
cloud.colors = o3d.utility.Vector3dVector(colors)
if normals is not None:
cloud.normals = o3d.utility.Vector3dVector(normals)
o3d.io.write_point_cloud(path_save, cloud)
The provided code snippet includes necessary dependencies for implementing the `colmap_to_json` function. Write a Python function `def colmap_to_json( scene_path: Path, sfm: Path, camera_model: CameraModel, ) -> int` to solve the following problem:
Converts COLMAP's cameras.bin and images.bin to a JSON file. Args: cameras_path: Path to the cameras.bin file. images_path: Path to the images.bin file. output_dir: Path to the output directory. camera_model: Camera model used. Returns: The number of registered images.
Here is the function:
def colmap_to_json(
scene_path: Path,
sfm: Path,
camera_model: CameraModel,
) -> int:
"""Converts COLMAP's cameras.bin and images.bin to a JSON file.
Args:
cameras_path: Path to the cameras.bin file.
images_path: Path to the images.bin file.
output_dir: Path to the output directory.
camera_model: Camera model used.
Returns:
The number of registered images.
"""
cameras_path = scene_path / sfm / "cameras.bin"
images_path = scene_path / sfm / "images.bin"
points_path = scene_path / sfm / "points3D.bin"
config_path = scene_path / "config.yaml"
with open(config_path, "r") as yamlfile:
scene_config = yaml.load(yamlfile, Loader=yaml.FullLoader)
radius = scene_config["radius"]
origin = np.array(scene_config["origin"]).reshape(1, 3)
cams = colmap_utils.read_cameras_binary(cameras_path)
imgs = colmap_utils.read_images_binary(images_path)
pts3d = colmap_utils.read_points3d_binary(points_path)
# pts3d_array = np.array([pts3d[p_id].xyz for p_id in pts3d])
# error_array = np.array([pts3d[p_id].error for p_id in pts3d])
# key point depth
pts3d_array = torch.ones(max(pts3d.keys()) + 1, 4)
error_array = torch.ones(max(pts3d.keys()) + 1, 1)
for pts_id, pts in tqdm(pts3d.items()):
pts3d_array[pts_id, :3] = torch.from_numpy(pts.xyz)
error_array[pts_id, 0] = torch.from_numpy(pts.error)
points_ori = []
min_track_length = scene_config["min_track_length"]
for id, p in pts3d.items():
if p.point2D_idxs.shape[0] > min_track_length:
points_ori.append(p.xyz)
points_ori = np.array(points_ori)
save_points("nori_3.ply", points_ori)
points_ori -= origin
print(points_ori.shape)
# expand and quantify
points_ori = torch.from_numpy(points_ori)
offset = torch.linspace(-1, 1.0, 3)
offset_cube = torch.meshgrid(offset, offset, offset)
offset_cube = torch.stack(offset_cube, dim=-1).reshape(-1, 3)
voxel_size = scene_config["voxel_size"]
offset_cube *= voxel_size # voxel size
expand_points = points_ori[:, None, :] + offset_cube[None]
expand_points = expand_points.reshape(-1, 3)
save_points("expand_points.ply", expand_points.numpy())
# filter
# filter out points out of [-1, 1]
mask = torch.prod((expand_points > -radius), axis=-1, dtype=torch.bool) & torch.prod(
(expand_points < radius), axis=-1, dtype=torch.bool
)
filtered_points = expand_points[mask]
save_points("filtered_points.ply", filtered_points.numpy())
grid_size = 32
voxel_size = 2 * radius / grid_size
quantified_points = torch.floor(((filtered_points / radius) + 1.0) * grid_size // 2)
index = quantified_points[:, 0] + quantified_points[:, 1] * grid_size + quantified_points[:, 2] * grid_size**2
offset = torch.linspace(-radius + voxel_size / 2.0, radius - voxel_size / 2.0, grid_size)
z, y, x = torch.meshgrid(offset, offset, offset, indexing="xy")
offset_cube = torch.stack([x, z, y], dim=-1).reshape(-1, 3)
mask = torch.zeros(grid_size**3, dtype=torch.bool)
mask[index.long()] = True
points_valid = offset_cube[mask]
save_points("quantified_points.ply", points_valid.numpy())
# breakpoint()
"""
xyz_world = np.array([pts3d[p_id].xyz for p_id in pts3d])
xyz_world_error = np.array([pts3d[p_id].error for p_id in pts3d])
xyz_world = xyz_world[xyz_world_error < 0.2]
sfm2gt = np.array(scene_config["sfm2gt"])
xyz_world = xyz_world @ sfm2gt[:3, :3].T + sfm2gt[:3, 3:].T
save_points("pp.ply", xyz_world)
"""
mesh = trimesh.creation.icosphere(5, radius=radius)
mesh.vertices = mesh.vertices + np.array(scene_config["origin"]).reshape(1, 3)
meshes = []
for p in points_valid:
box = trimesh.creation.box(extents=(voxel_size, voxel_size, voxel_size))
box.vertices = box.vertices + origin + p.numpy().reshape(-1, 3)
meshes.append(box)
mesh = trimesh.util.concatenate(meshes)
mesh.export("box.ply")
"""
vertices = mesh.vertices @ sfm2gt[:3, :3].T + sfm2gt[:3, 3:].T
save_points("sphere.ply", vertices)
"""
# print(cameras)
poses = []
fxs = []
fys = []
cxs = []
cys = []
image_filenames = []
mask_filenames = []
masks = []
data = scene_path
for _id, cam in cams.items():
img = imgs[_id]
assert cam.model == "PINHOLE", "Only pinhole (perspective) camera model is supported at the moment"
pose = torch.cat([torch.tensor(img.qvec2rotmat()), torch.tensor(img.tvec.reshape(3, 1))], dim=1)
pose = torch.cat([pose, torch.tensor([[0.0, 0.0, 0.0, 1.0]])], dim=0)
poses.append(torch.linalg.inv(pose))
fxs.append(torch.tensor(cam.params[0]))
fys.append(torch.tensor(cam.params[1]))
cxs.append(torch.tensor(cam.params[2]))
cys.append(torch.tensor(cam.params[3]))
image_filenames.append(data / "dense/images" / img.name)
mask_filenames.append(data / "semantic_maps" / img.name.replace(".jpg", ".npz"))
# visualize pts3d for each image
valid_3d_mask = img.point3D_ids != -1
point3d_ids = img.point3D_ids[valid_3d_mask]
img_p3d = pts3d_array[point3d_ids]
img_err = error_array[point3d_ids]
# img_p3d = img_p3d[img_err[:, 0] < torch.median(img_err)]
save_points(f"W/{_id}_nof.ply", img_p3d.cpu().numpy()[:, :3])
# render bounding sphere mask
renderer = Renderer()
mesh_opengl = renderer.mesh_opengl(mesh)
intrinsic = np.eye(4)
intrinsic[0, 0] = cam.params[0]
intrinsic[1, 1] = cam.params[1]
intrinsic[0, 2] = cam.params[2]
intrinsic[1, 2] = cam.params[3]
H = cam.height
W = cam.width
pose = poses[-1].cpu().numpy()
_, depth_pred = renderer(H, W, intrinsic, pose, mesh_opengl)
print(intrinsic)
print(pose)
print(depth_pred.min(), depth_pred.max())
renderer.delete()
mask = np.load(mask_filenames[-1])["arr_0"]
semantic_image = show_result(mask)
# ['person', 'car', 'bicycle', 'minibike'] with id [12, 20,127,116]
# ['sky'] = 2
# new mask [80, 83, 43, 41, 115, 110]
semantic_ids_to_skip = [12, 20, 127, 116] # + [80, 83, 43, 41, 115, 110] # + [2]
mask = np.stack([mask != semantic_id for semantic_id in semantic_ids_to_skip]) # + mask2
mask = mask.all(axis=0)
rgb_img = cv2.imread(str(image_filenames[-1]))
print(rgb_img.shape, mask.shape, H, W)
if rgb_img.shape[0] != H and rgb_img.shape[1] != W:
print("warning")
continue
rgb_img_masked_semantic = rgb_img * mask[..., None]
depth_mask = depth_pred > 0.0001
rgb_img_masked = rgb_img * depth_mask[..., None]
mask = depth_mask & mask
rgb_img = rgb_img * mask[..., None]
image = np.concatenate((rgb_img, rgb_img_masked_semantic, semantic_image, rgb_img_masked), axis=1)
# cv2.imshow("ssdf", image)
# cv2.waitKey(0)
# write mask
(scene_path / "masks").mkdir(exist_ok=True, parents=False)
np.save(scene_path / "masks" / img.name.replace(".jpg", ".npy"), mask) | Converts COLMAP's cameras.bin and images.bin to a JSON file. Args: cameras_path: Path to the cameras.bin file. images_path: Path to the images.bin file. output_dir: Path to the output directory. camera_model: Camera model used. Returns: The number of registered images. |
162,402 | from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Literal, Tuple
import torch
import tyro
from rich.console import Console
from nerfstudio.model_components.ray_samplers import save_points
from nerfstudio.utils.eval_utils import eval_setup
from nerfstudio.utils.marching_cubes import (
get_surface_occupancy,
get_surface_sliding,
get_surface_sliding_with_contraction,
)
class ExtractMesh:
"""Load a checkpoint, run marching cubes, extract mesh, and save it to a ply file."""
# Path to config YAML file.
load_config: Path
# Marching cube resolution.
resolution: int = 1024
# Name of the output file.
output_path: Path = Path("output.ply")
# Whether to simplify the mesh.
simplify_mesh: bool = False
# extract the mesh using occupancy field (unisurf) or SDF, default sdf
is_occupancy: bool = False
"""Minimum of the bounding box."""
bounding_box_min: Tuple[float, float, float] = (-1.0, -1.0, -1.0)
"""Maximum of the bounding box."""
bounding_box_max: Tuple[float, float, float] = (1.0, 1.0, 1.0)
"""marching cube threshold"""
marching_cube_threshold: float = 0.0
"""create visibility mask"""
create_visibility_mask: bool = False
"""save visibility grid"""
save_visibility_grid: bool = False
"""visibility grid resolution"""
visibility_grid_resolution: int = 512
"""threshold for considering a points is valid when splat to visibility grid"""
valid_points_thres: float = 0.005
"""sub samples factor of images when creating visibility grid"""
sub_sample_factor: int = 8
"""torch precision"""
torch_precision: Literal["highest", "high"] = "high"
def main(self) -> None:
"""Main function."""
torch.set_float32_matmul_precision(self.torch_precision)
assert str(self.output_path)[-4:] == ".ply"
self.output_path.parent.mkdir(parents=True, exist_ok=True)
_, pipeline, _ = eval_setup(self.load_config)
CONSOLE.print("Extract mesh with marching cubes and may take a while")
if self.create_visibility_mask:
assert self.resolution % 512 == 0
coarse_mask = pipeline.get_visibility_mask(
self.visibility_grid_resolution, self.valid_points_thres, self.sub_sample_factor
)
def inv_contract(x):
mag = torch.linalg.norm(x, ord=pipeline.model.scene_contraction.order, dim=-1)
mask = mag >= 1
x_new = x.clone()
x_new[mask] = (1 / (2 - mag[mask][..., None])) * (x[mask] / mag[mask][..., None])
return x_new
if self.save_visibility_grid:
offset = torch.linspace(-2.0, 2.0, 512)
x, y, z = torch.meshgrid(offset, offset, offset, indexing="ij")
offset_cube = torch.stack([x, y, z], dim=-1).reshape(-1, 3).to(coarse_mask.device)
points = offset_cube[coarse_mask.reshape(-1) > 0]
points = inv_contract(points)
save_points("mask.ply", points.cpu().numpy())
torch.save(coarse_mask, "coarse_mask.pt")
get_surface_sliding_with_contraction(
sdf=lambda x: (
pipeline.model.field.forward_geonetwork(x)[:, 0] - self.marching_cube_threshold
).contiguous(),
resolution=self.resolution,
bounding_box_min=self.bounding_box_min,
bounding_box_max=self.bounding_box_max,
coarse_mask=coarse_mask,
output_path=self.output_path,
simplify_mesh=self.simplify_mesh,
inv_contraction=inv_contract,
)
return
if self.is_occupancy:
# for unisurf
get_surface_occupancy(
occupancy_fn=lambda x: torch.sigmoid(
10 * pipeline.model.field.forward_geonetwork(x)[:, 0].contiguous()
),
resolution=self.resolution,
bounding_box_min=self.bounding_box_min,
bounding_box_max=self.bounding_box_max,
level=0.5,
device=pipeline.model.device,
output_path=self.output_path,
)
else:
assert self.resolution % 512 == 0
# for sdf we can multi-scale extraction.
get_surface_sliding(
sdf=lambda x: pipeline.model.field.forward_geonetwork(x)[:, 0].contiguous(),
resolution=self.resolution,
bounding_box_min=self.bounding_box_min,
bounding_box_max=self.bounding_box_max,
coarse_mask=pipeline.model.scene_box.coarse_binary_gird,
output_path=self.output_path,
simplify_mesh=self.simplify_mesh,
)
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(tyro.conf.FlagConversionOff[ExtractMesh]).main() | Entrypoint for use with pyproject scripts. |
162,403 | from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Optional
import torch
import torchvision
import tyro
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.exporter import texture_utils
from nerfstudio.exporter.exporter_utils import get_mesh_from_filename
from nerfstudio.utils.eval_utils import eval_setup
class TextureMesh:
"""
Export a textured mesh with color computed from the NeRF.
"""
load_config: Path
"""Path to the config YAML file."""
output_dir: Path
"""Path to the output directory."""
input_mesh_filename: Path
"""Mesh filename to texture."""
px_per_uv_triangle: int = 4
"""Number of pixels per UV square."""
unwrap_method: Literal["xatlas", "custom"] = "xatlas"
"""The method to use for unwrapping the mesh."""
num_pixels_per_side: int = 2048
"""If using xatlas for unwrapping, the pixels per side of the texture image."""
target_num_faces: Optional[int] = 50000
"""Target number of faces for the mesh to texture."""
def main(self) -> None:
"""Export textured mesh"""
# pylint: disable=too-many-statements
if not self.output_dir.exists():
self.output_dir.mkdir(parents=True)
# load the Mesh
mesh = get_mesh_from_filename(str(self.input_mesh_filename), target_num_faces=self.target_num_faces)
# load the Pipeline
_, pipeline, _ = eval_setup(self.load_config, test_mode="inference")
# texture the mesh with NeRF and export to a mesh.obj file
# and a material and texture file
texture_utils.export_textured_mesh(
mesh,
pipeline,
px_per_uv_triangle=self.px_per_uv_triangle,
output_dir=self.output_dir,
unwrap_method=self.unwrap_method,
num_pixels_per_side=self.num_pixels_per_side,
)
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(tyro.conf.FlagConversionOff[TextureMesh]).main() | Entrypoint for use with pyproject scripts. |
162,404 | from __future__ import annotations
import json
import sys
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Optional
import mediapy as media
import numpy as np
import torch
import tyro
from rich.console import Console
from rich.progress import (
BarColumn,
Progress,
TaskProgressColumn,
TextColumn,
TimeRemainingColumn,
)
from typing_extensions import Literal, assert_never
from nerfstudio.cameras.camera_paths import get_path_from_json, get_spiral_path
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.configs.base_config import Config
from nerfstudio.pipelines.base_pipeline import Pipeline
from nerfstudio.utils import install_checks
from nerfstudio.utils.eval_utils import eval_setup
from nerfstudio.utils.rich_utils import ItersPerSecColumn
CONSOLE = Console(width=120)
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
class Pipeline(nn.Module):
"""The intent of this class is to provide a higher level interface for the Model
that will be easy to use for our Trainer class.
This class will contain high level functions for the model like getting the loss
dictionaries and visualization code. It should have ways to get the next iterations
training loss, evaluation loss, and generate whole images for visualization. Each model
class should be 1:1 with a pipeline that can act as a standardized interface and hide
differences in how each model takes in and outputs data.
This class's function is to hide the data manager and model classes from the trainer,
worrying about:
1) Fetching data with the data manager
2) Feeding the model the data and fetching the loss
Hopefully this provides a higher level interface for the trainer to use, and
simplifying the model classes, which each may have different forward() methods
and so on.
Args:
config: configuration to instantiate pipeline
device: location to place model and data
test_mode:
'train': loads train/eval datasets into memory
'test': loads train/test datset into memory
'inference': does not load any dataset into memory
world_size: total number of machines available
local_rank: rank of current machine
Attributes:
datamanager: The data manager that will be used
model: The model that will be used
"""
# pylint: disable=abstract-method
datamanager: DataManager
_model: Model
def model(self):
"""Returns the unwrapped model if in ddp"""
return module_wrapper(self._model)
def device(self):
"""Returns the device that the model is on."""
return self.model.device
def get_train_loss_dict(self, step: int):
"""This function gets your training loss dict. This will be responsible for
getting the next batch of data from the DataManager and interfacing with the
Model class, feeding the data to the model's forward function.
Args:
step: current iteration step to update sampler if using DDP (distributed)
"""
if self.world_size > 1 and step:
assert self.datamanager.train_sampler is not None
self.datamanager.train_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_train(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
return model_outputs, loss_dict, metrics_dict
def get_eval_loss_dict(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
self.eval()
if self.world_size > 1:
assert self.datamanager.eval_sampler is not None
self.datamanager.eval_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_eval(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
self.train()
return model_outputs, loss_dict, metrics_dict
def get_eval_image_metrics_and_images(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
def get_average_eval_image_metrics(self, step: Optional[int] = None):
"""Iterate over all the images in the eval dataset and get the average."""
def load_pipeline(self, loaded_state: Dict[str, Any]) -> None:
"""Load the checkpoint from the given path
Args:
loaded_state: pre-trained model state dict
"""
def get_training_callbacks(
self, training_callback_attributes: TrainingCallbackAttributes
) -> List[TrainingCallback]:
"""Returns the training callbacks from both the Dataloader and the Model."""
def get_param_groups(self) -> Dict[str, List[Parameter]]:
"""Get the param groups for the pipeline.
Returns:
A list of dictionaries containing the pipeline's param groups.
"""
class ItersPerSecColumn(ProgressColumn):
"""Renders the iterations per second for a progress bar."""
def __init__(self, suffix="it/s") -> None:
super().__init__()
self.suffix = suffix
def render(self, task: "Task") -> Text:
"""Show data transfer speed."""
speed = task.finished_speed or task.speed
if speed is None:
return Text("?", style="progress.data.speed")
return Text(f"{speed:.2f} {self.suffix}", style="progress.data.speed")
The provided code snippet includes necessary dependencies for implementing the `_render_trajectory_video` function. Write a Python function `def _render_trajectory_video( pipeline: Pipeline, cameras: Cameras, output_filename: Path, rendered_output_names: List[str], rendered_resolution_scaling_factor: float = 1.0, seconds: float = 5.0, output_format: Literal["images", "video"] = "video", ) -> None` to solve the following problem:
Helper function to create a video of the spiral trajectory. Args: pipeline: Pipeline to evaluate with. cameras: Cameras to render. output_filename: Name of the output file. rendered_output_names: List of outputs to visualise. rendered_resolution_scaling_factor: Scaling factor to apply to the camera image resolution. seconds: Length of output video. output_format: How to save output data.
Here is the function:
def _render_trajectory_video(
pipeline: Pipeline,
cameras: Cameras,
output_filename: Path,
rendered_output_names: List[str],
rendered_resolution_scaling_factor: float = 1.0,
seconds: float = 5.0,
output_format: Literal["images", "video"] = "video",
) -> None:
"""Helper function to create a video of the spiral trajectory.
Args:
pipeline: Pipeline to evaluate with.
cameras: Cameras to render.
output_filename: Name of the output file.
rendered_output_names: List of outputs to visualise.
rendered_resolution_scaling_factor: Scaling factor to apply to the camera image resolution.
seconds: Length of output video.
output_format: How to save output data.
"""
CONSOLE.print("[bold green]Creating trajectory video")
images = []
cameras.rescale_output_resolution(rendered_resolution_scaling_factor)
cameras = cameras.to(pipeline.device)
progress = Progress(
TextColumn(":movie_camera: Rendering :movie_camera:"),
BarColumn(),
TaskProgressColumn(show_speed=True),
ItersPerSecColumn(suffix="fps"),
TimeRemainingColumn(elapsed_when_finished=True, compact=True),
)
output_image_dir = output_filename.parent / output_filename.stem
if output_format == "images":
output_image_dir.mkdir(parents=True, exist_ok=True)
with progress:
for camera_idx in progress.track(range(cameras.size), description=""):
camera_ray_bundle = cameras.generate_rays(camera_indices=camera_idx)
with torch.no_grad():
outputs = pipeline.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)
render_image = []
for rendered_output_name in rendered_output_names:
if rendered_output_name not in outputs:
CONSOLE.rule("Error", style="red")
CONSOLE.print(f"Could not find {rendered_output_name} in the model outputs", justify="center")
CONSOLE.print(f"Please set --rendered_output_name to one of: {outputs.keys()}", justify="center")
sys.exit(1)
output_image = outputs[rendered_output_name].cpu().numpy()
render_image.append(output_image)
render_image = np.concatenate(render_image, axis=1)
if output_format == "images":
media.write_image(output_image_dir / f"{camera_idx:05d}.png", render_image)
else:
images.append(render_image)
if output_format == "video":
fps = len(images) / seconds
# make the folder if it doesn't exist
output_filename.parent.mkdir(parents=True, exist_ok=True)
with CONSOLE.status("[yellow]Saving video", spinner="bouncingBall"):
media.write_video(output_filename, images, fps=fps)
CONSOLE.rule("[green] :tada: :tada: :tada: Success :tada: :tada: :tada:")
CONSOLE.print(f"[green]Saved video to {output_filename}", justify="center") | Helper function to create a video of the spiral trajectory. Args: pipeline: Pipeline to evaluate with. cameras: Cameras to render. output_filename: Name of the output file. rendered_output_names: List of outputs to visualise. rendered_resolution_scaling_factor: Scaling factor to apply to the camera image resolution. seconds: Length of output video. output_format: How to save output data. |
162,405 | from __future__ import annotations
import json
import sys
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Optional
import mediapy as media
import numpy as np
import torch
import tyro
from rich.console import Console
from rich.progress import (
BarColumn,
Progress,
TaskProgressColumn,
TextColumn,
TimeRemainingColumn,
)
from typing_extensions import Literal, assert_never
from nerfstudio.cameras.camera_paths import get_path_from_json, get_spiral_path
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.configs.base_config import Config
from nerfstudio.pipelines.base_pipeline import Pipeline
from nerfstudio.utils import install_checks
from nerfstudio.utils.eval_utils import eval_setup
from nerfstudio.utils.rich_utils import ItersPerSecColumn
class RenderTrajectory:
"""Load a checkpoint, render a trajectory, and save to a video file."""
# Path to config YAML file.
load_config: Path
# Name of the renderer outputs to use. rgb, depth, etc. concatenates them along y axis
rendered_output_names: List[str] = field(default_factory=lambda: ["rgb"])
# Trajectory to render.
traj: Literal["spiral", "filename"] = "spiral"
# Scaling factor to apply to the camera image resolution.
downscale_factor: int = 1
# Filename of the camera path to render.
camera_path_filename: Path = Path("camera_path.json")
# Name of the output file.
output_path: Path = Path("renders/output.mp4")
# How long the video should be.
seconds: float = 5.0
# How to save output data.
output_format: Literal["images", "video"] = "video"
# Specifies number of rays per chunk during eval.
eval_num_rays_per_chunk: Optional[int] = None
def main(self) -> None:
"""Main function."""
_, pipeline, _ = eval_setup(
self.load_config,
eval_num_rays_per_chunk=self.eval_num_rays_per_chunk,
test_mode="test" if self.traj == "spiral" else "inference",
)
install_checks.check_ffmpeg_installed()
seconds = self.seconds
# TODO(ethan): use camera information from parsing args
if self.traj == "spiral":
camera_start = pipeline.datamanager.eval_dataloader.get_camera(image_idx=0).flatten()
# TODO(ethan): pass in the up direction of the camera
camera_path = get_spiral_path(camera_start, steps=30, radius=0.1)
elif self.traj == "filename":
with open(self.camera_path_filename, "r", encoding="utf-8") as f:
camera_path = json.load(f)
seconds = camera_path["seconds"]
camera_path = get_path_from_json(camera_path)
else:
assert_never(self.traj)
_render_trajectory_video(
pipeline,
camera_path,
output_filename=self.output_path,
rendered_output_names=self.rendered_output_names,
rendered_resolution_scaling_factor=1.0 / self.downscale_factor,
seconds=seconds,
output_format=self.output_format,
)
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(RenderTrajectory).main() | Entrypoint for use with pyproject scripts. |
162,406 | from __future__ import annotations
import json
from dataclasses import dataclass
from pathlib import Path
import cv2
import numpy as np
import torch
import tyro
from rich.console import Console
from nerfstudio.utils.eval_utils import eval_setup
class ComputePSNR:
"""Load a checkpoint, compute some PSNR metrics, and save it to a JSON file."""
# Path to config YAML file.
load_config: Path
# Name of the output file.
output_path: Path = Path("output.json")
# Name of the output images dir.
output_images_path: Path = Path("output_images/")
def main(self) -> None:
"""Main function."""
config, pipeline, checkpoint_path = eval_setup(self.load_config)
assert self.output_path.suffix == ".json"
metrics_dict, images_dict_list = pipeline.get_average_eval_image_metrics()
self.output_path.parent.mkdir(parents=True, exist_ok=True)
self.output_images_path.mkdir(parents=True, exist_ok=True)
# Get the output and define the names to save to
benchmark_info = {
"experiment_name": config.experiment_name,
"method_name": config.method_name,
"checkpoint": str(checkpoint_path),
"results": metrics_dict,
}
# Save output to output file
self.output_path.write_text(json.dumps(benchmark_info, indent=2), "utf8")
CONSOLE.print(f"Saved results to: {self.output_path}")
for idx, images_dict in enumerate(images_dict_list):
for k, v in images_dict.items():
cv2.imwrite(
str(self.output_images_path / Path(f"{k}_{idx}.png")),
(v.cpu().numpy() * 255.0).astype(np.uint8)[..., ::-1],
)
CONSOLE.print(f"Saved rendering results to: {self.output_images_path}")
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(ComputePSNR).main() | Entrypoint for use with pyproject scripts. |
162,407 | import subprocess
import sys
import tyro
import yaml
from rich.console import Console
from rich.style import Style
def run_code_checks(continue_on_fail: bool = False):
"""Run a github actions file locally.
Args:
continue_on_fail: Whether or not to continue running actions commands if the current one fails
"""
# core code checks
run_github_actions_file(filename=".github/workflows/core_code_checks.yml", continue_on_fail=continue_on_fail)
# viewer build and deployment
# run_github_actions_file(filename=".github/workflows/viewer_build_deploy.yml", continue_on_fail=continue_on_fail)
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(run_code_checks) | Entrypoint for use with pyproject scripts. |
162,408 | import argparse
import glob
import os.path
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import PIL
import torch
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms
from tqdm import tqdm
from data.transforms import get_transform
from modules.midas.dpt_depth import DPTDepthModel
from modules.unet import UNet
def standardize_depth_map(img, mask_valid=None, trunc_value=0.1):
if mask_valid is not None:
img[~mask_valid] = torch.nan
sorted_img = torch.sort(torch.flatten(img))[0]
# Remove nan, nan at the end of sort
num_nan = sorted_img.isnan().sum()
if num_nan > 0:
sorted_img = sorted_img[:-num_nan]
# Remove outliers
trunc_img = sorted_img[int(trunc_value * len(sorted_img)):int((1 - trunc_value) * len(sorted_img))]
trunc_mean = trunc_img.mean()
trunc_var = trunc_img.var()
eps = 1e-6
# Replace nan by mean
img = torch.nan_to_num(img, nan=trunc_mean)
# Standardize
img = (img - trunc_mean) / torch.sqrt(trunc_var + eps)
return img | null |
162,409 | import argparse
import glob
import os.path
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import PIL
import torch
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms
from tqdm import tqdm
args = parser.parse_args()
from data.transforms import get_transform
from modules.midas.dpt_depth import DPTDepthModel
from modules.unet import UNet
trans_topil = transforms.ToPILImage()
os.system(f"mkdir -p {args.output_path}")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if args.task == "normal":
image_size = 384
## Version 1 model
# pretrained_weights_path = root_dir + 'omnidata_unet_normal_v1.pth'
# model = UNet(in_channels=3, out_channels=3)
# checkpoint = torch.load(pretrained_weights_path, map_location=map_location)
# if 'state_dict' in checkpoint:
# state_dict = {}
# for k, v in checkpoint['state_dict'].items():
# state_dict[k.replace('model.', '')] = v
# else:
# state_dict = checkpoint
pretrained_weights_path = os.path.join(root_dir, "omnidata_dpt_normal_v2.ckpt")
model = DPTDepthModel(backbone="vitb_rn50_384", num_channels=3) # DPT Hybrid
checkpoint = torch.load(pretrained_weights_path, map_location=map_location)
if "state_dict" in checkpoint:
state_dict = {}
for k, v in checkpoint["state_dict"].items():
state_dict[k[6:]] = v
else:
state_dict = checkpoint
model.load_state_dict(state_dict)
model.to(device)
trans_totensor = transforms.Compose(
[
transforms.Resize(image_size, interpolation=PIL.Image.BILINEAR),
transforms.CenterCrop(image_size),
get_transform("rgb", image_size=None),
]
)
elif args.task == "depth":
image_size = 384
pretrained_weights_path = os.path.join(root_dir, "omnidata_dpt_depth_v2.ckpt") # 'omnidata_dpt_depth_v1.ckpt'
# model = DPTDepthModel(backbone='vitl16_384') # DPT Large
model = DPTDepthModel(backbone="vitb_rn50_384") # DPT Hybrid
checkpoint = torch.load(pretrained_weights_path, map_location=map_location)
if "state_dict" in checkpoint:
state_dict = {}
for k, v in checkpoint["state_dict"].items():
state_dict[k[6:]] = v
else:
state_dict = checkpoint
model.load_state_dict(state_dict)
model.to(device)
trans_totensor = transforms.Compose(
[
transforms.Resize(image_size, interpolation=PIL.Image.BILINEAR),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(mean=0.5, std=0.5),
]
)
else:
print("task should be one of the following: normal, depth")
sys.exit()
def save_outputs(img_path, output_file_name):
with torch.no_grad():
save_path = os.path.join(args.output_path, output_file_name.replace("_rgb", f"_{args.task}") + ".png")
# print(f"Reading input {img_path} ...")
img = Image.open(img_path)
H, W = img.size[1], img.size[0]
assert H == W, "Image should be square"
assert H % 384 == 0, "Image size should be divisible by 384"
scale_factor = H // 384
img_tensor = trans_totensor(img)[:3].unsqueeze(0).to(device)
if img_tensor.shape[1] == 1:
img_tensor = img_tensor.repeat_interleave(3, 1)
output = model(img_tensor).clamp(min=0, max=1)
if args.task == "depth":
if scale_factor > 1:
output = F.interpolate(output.unsqueeze(0), scale_factor=scale_factor, mode="nearest").squeeze(0)
output = output.clamp(0, 1)
np.save(save_path.replace(".png", ".npy"), output.detach().cpu().numpy()[0])
plt.imsave(save_path, output.detach().cpu().squeeze(), cmap="viridis")
else:
if scale_factor > 1:
output = torch.nn.functional.interpolate(output, scale_factor=scale_factor, mode="nearest")
np.save(save_path.replace(".png", ".npy"), output.detach().cpu().numpy()[0])
trans_topil(output[0]).save(save_path)
# print(f"Writing output {save_path} ...") | null |
162,410 | import os
from dataclasses import dataclass
from pathlib import Path
from typing import Tuple
import cv2
import numpy as np
import tyro
from nuscenes.nuscenes import NuScenes as NuScenesDatabase
from nuscenes.utils.data_classes import Box
from nuscenes.utils.geometry_utils import BoxVisibility, view_points
from tqdm import tqdm
from typing_extensions import Literal
class ProcessNuScenesMasks:
"""Use cuboid detections to render masks for dynamic objects."""
data_dir: Path
"""Path to NuScenes dataset."""
output_dir: Path
"""Path to the output directory."""
version: Literal["v1.0-mini", "v1.0-trainval"] = "v1.0-mini"
"""Which version of the dataset to process."""
velocity_thresh: float = 0.75
"""Minimum speed for object to be considered dynamic."""
cameras: Tuple[Literal["FRONT", "FRONT_LEFT", "FRONT_RIGHT", "BACK", "BACK_LEFT", "BACK_RIGHT"], ...] = (
"FRONT",
"FRONT_LEFT",
"FRONT_RIGHT",
"BACK",
"BACK_LEFT",
"BACK_RIGHT",
)
"""Which cameras to use."""
verbose: bool = False
"""If True, print extra logging and visualize images on screen."""
def main(self) -> None:
"""Generate NuScenes dynamic object masks."""
nusc = NuScenesDatabase(version=self.version, dataroot=self.data_dir, verbose=self.verbose)
cameras = ["CAM_" + camera for camera in self.cameras]
for camera in cameras:
(self.output_dir / "masks" / camera).mkdir(parents=True, exist_ok=True)
# get samples for scene
samples = [samp for samp in nusc.sample]
# sort by timestamp (only to make chronological viz easier)
samples.sort(key=lambda x: (x["scene_token"], x["timestamp"]))
# get which instances are moving in any frame (these are what we mask)
instances = nusc.instance
for instance in instances:
is_dynamic = False
ann_token = instance["first_annotation_token"]
while ann_token:
velocity = nusc.box_velocity(ann_token)
if not np.linalg.norm(velocity) < self.velocity_thresh:
is_dynamic = True
break
ann_token = nusc.get("sample_annotation", ann_token)["next"]
instance["is_dynamic"] = is_dynamic
instances_is_dynamic = {instance["token"]: instance["is_dynamic"] for instance in instances}
for sample in tqdm(samples):
viz = []
for camera in cameras:
camera_data = nusc.get("sample_data", sample["data"][camera])
calibrated_sensor = nusc.get("calibrated_sensor", camera_data["calibrated_sensor_token"])
intrinsics = np.array(calibrated_sensor["camera_intrinsic"])
_, boxes, _ = nusc.get_sample_data(sample["data"][camera], box_vis_level=BoxVisibility.ANY)
# TODO: BoxVisibility.ANY misses boxes that are partially behind the camera leading to missed masks
# Instead use BoxVisibility.NONE and make sure to rasterize box faces correctly
mask = np.ones((900, 1600), dtype=np.uint8)
for box in boxes:
# Dont mask out static objects (static in all frames)
instance_token = nusc.get("sample_annotation", box.token)["instance_token"]
if not instances_is_dynamic[instance_token]:
continue
# project box to image plane and rasterize each face
corners_3d = box.corners()
corners = view_points(corners_3d, intrinsics, normalize=True)[:2, :]
corners = np.round(corners).astype(int).T
cv2.fillPoly(mask, [corners[[0, 1, 2, 3]]], 0) # front
cv2.fillPoly(mask, [corners[[4, 5, 6, 7]]], 0) # back
cv2.fillPoly(mask, [corners[[0, 1, 5, 4]]], 0) # top
cv2.fillPoly(mask, [corners[[2, 3, 7, 6]]], 0) # bottom
cv2.fillPoly(mask, [corners[[0, 3, 7, 4]]], 0) # left
cv2.fillPoly(mask, [corners[[1, 2, 6, 5]]], 0) # right
maskname = os.path.split(camera_data["filename"])[1].replace("jpg", "png")
cv2.imwrite(str(self.output_dir / "masks" / camera / maskname), mask * 255)
if self.verbose:
img = cv2.imread(str(self.data_dir / camera_data["filename"]))
mask = ~mask.astype(bool)
img[mask, :] -= np.minimum(img[mask, :], 100)
viz.append(img)
if self.verbose:
if len(viz) == 6:
viz = np.vstack((np.hstack(viz[:3]), np.hstack(viz[3:])))
viz = cv2.resize(viz, (int(1600 * 3 / 3), int(900 * 2 / 3)))
elif len(viz) == 3:
viz = np.hstack(viz[:3])
viz = cv2.resize(viz, (int(1600 * 3 / 3), int(900 / 3)))
elif len(viz) == 1:
viz = viz[0]
else:
raise ValueError("Only support 1 or 3 or 6 cameras for viz")
cv2.imshow("", viz)
cv2.waitKey(1)
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
Entrypoint for use with pyproject scripts.
Here is the function:
def entrypoint():
"""Entrypoint for use with pyproject scripts."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(ProcessNuScenesMasks).main() | Entrypoint for use with pyproject scripts. |
162,411 | import argparse
import glob
import json
import os
import re
import shutil
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
import PIL
from PIL import Image
from torchvision import transforms
The provided code snippet includes necessary dependencies for implementing the `alphanum_key` function. Write a Python function `def alphanum_key(s)` to solve the following problem:
Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"]
Here is the function:
def alphanum_key(s):
"""Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [int(x) if x.isdigit() else x for x in re.split("([0-9]+)", s)] | Turn a string into a list of string and number chunks. "z23a" -> ["z", 23, "a"] |
162,412 | import argparse
import glob
import json
import os
import re
import shutil
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
import PIL
from PIL import Image
from torchvision import transforms
poses, valid_poses = load_poses(pose_path)
poses = np.array(poses)
poses[:, 0:3, 1:3] *= -1
poses[:, :3, 3] -= center
poses[:, :3, 3] *= scale
with open(output_path / "meta_data.json", "w", encoding="utf-8") as f:
json.dump(output_data, f, indent=4)
def load_poses(posefile):
file = open(posefile, "r")
lines = file.readlines()
file.close()
poses = []
valid = []
lines_per_matrix = 4
for i in range(0, len(lines), lines_per_matrix):
if "nan" in lines[i]:
valid.append(False)
poses.append(np.eye(4, 4, dtype=np.float32).tolist())
else:
valid.append(True)
pose_floats = [[float(x) for x in line.split()] for line in lines[i : i + lines_per_matrix]]
poses.append(pose_floats)
return poses, valid | null |
162,413 | from datetime import timedelta, datetime
import json
import re
import requests
from requests.adapters import HTTPAdapter
from urllib.parse import quote
def url_updated(url): # 判断远程远程链接是否已经更新
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=2))
s.mount('https://', HTTPAdapter(max_retries=2))
try:
resp = s.get(url, timeout=4)
status = resp.status_code
except Exception:
status = 404
if status == 200:
url_updated = True
else:
url_updated = False
return url_updated | null |
162,414 | import re
import yaml
import json
import re
import time
import os
from sub_convert import sub_convert
from subs_function import subs_function
from list_merge_airport import sub_merge
log_file = './LogInfoAir.txt'
provider_path = './update/provider/'
config_file = './update/provider/config.yml'
class NoAliasDumper(yaml.SafeDumper):
def ignore_aliases(self, data):
return True
def substrings(string, left, right):
value = string.replace('\n', '').replace(' ', '')
start = value.index(left)
end = value[start:].index(
right) + (value.__len__() - value[start:].__len__())
final_value = value[start:end].replace(left, '')
return final_value
class subs_function:
def convert_sub(url: str, output: str, convertor_host="http://0.0.0.0:25500", show_url=False, extra_options=""):
url = urllib.parse.quote(url, safe='')
try:
convert_url = f'{convertor_host}/sub?target={output}&url={url}&insert=false&emoji=true&list=true&tfo=false&scv=false&fdn=false&sort=false{extra_options}'
result = requests.get(convert_url, timeout=240).text
if show_url:
print(f"url to host for {output} : {convert_url}")
if result == "No nodes were found!":
return "Err: No nodes found"
return result
except Exception as e:
print(e)
return "Err: failed to parse sub"
def is_line_valid(line, support_vless=False):
if (line.startswith("ssr://") or line.startswith("ss://")
or line.startswith("trojan://") or line.startswith("vmess://")):
return line
if(support_vless and line.startswith("vless://")):
return line
return ''
def fix_proxies_name(corresponding_proxies: []):
emoji = {
'AD': '🇦🇩', 'AE': '🇦🇪', 'AF': '🇦🇫', 'AG': '🇦🇬',
'AI': '🇦🇮', 'AL': '🇦🇱', 'AM': '🇦🇲', 'AO': '🇦🇴',
'AQ': '🇦🇶', 'AR': '🇦🇷', 'AS': '🇦🇸', 'AT': '🇦🇹',
'AU': '🇦🇺', 'AW': '🇦🇼', 'AX': '🇦🇽', 'AZ': '🇦🇿',
'BA': '🇧🇦', 'BB': '🇧🇧', 'BD': '🇧🇩', 'BE': '🇧🇪',
'BF': '🇧🇫', 'BG': '🇧🇬', 'BH': '🇧🇭', 'BI': '🇧🇮',
'BJ': '🇧🇯', 'BL': '🇧🇱', 'BM': '🇧🇲', 'BN': '🇧🇳',
'BO': '🇧🇴', 'BQ': '🇧🇶', 'BR': '🇧🇷', 'BS': '🇧🇸',
'BT': '🇧🇹', 'BV': '🇧🇻', 'BW': '🇧🇼', 'BY': '🇧🇾',
'BZ': '🇧🇿', 'CA': '🇨🇦', 'CC': '🇨🇨', 'CD': '🇨🇩',
'CF': '🇨🇫', 'CG': '🇨🇬', 'CH': '🇨🇭', 'CI': '🇨🇮',
'CK': '🇨🇰', 'CL': '🇨🇱', 'CM': '🇨🇲', 'CN': '🇨🇳',
'CO': '🇨🇴', 'CR': '🇨🇷', 'CU': '🇨🇺', 'CV': '🇨🇻',
'CW': '🇨🇼', 'CX': '🇨🇽', 'CY': '🇨🇾', 'CZ': '🇨🇿',
'DE': '🇩🇪', 'DJ': '🇩🇯', 'DK': '🇩🇰', 'DM': '🇩🇲',
'DO': '🇩🇴', 'DZ': '🇩🇿', 'EC': '🇪🇨', 'EE': '🇪🇪',
'EG': '🇪🇬', 'EH': '🇪🇭', 'ER': '🇪🇷', 'ES': '🇪🇸',
'ET': '🇪🇹', 'EU': '🇪🇺', 'FI': '🇫🇮', 'FJ': '🇫🇯',
'FK': '🇫🇰', 'FM': '🇫🇲', 'FO': '🇫🇴', 'FR': '🇫🇷',
'GA': '🇬🇦', 'GB': '🇬🇧', 'GD': '🇬🇩', 'GE': '🇬🇪',
'GF': '🇬🇫', 'GG': '🇬🇬', 'GH': '🇬🇭', 'GI': '🇬🇮',
'GL': '🇬🇱', 'GM': '🇬🇲', 'GN': '🇬🇳', 'GP': '🇬🇵',
'GQ': '🇬🇶', 'GR': '🇬🇷', 'GS': '🇬🇸', 'GT': '🇬🇹',
'GU': '🇬🇺', 'GW': '🇬🇼', 'GY': '🇬🇾', 'HK': '🇭🇰',
'HM': '🇭🇲', 'HN': '🇭🇳', 'HR': '🇭🇷', 'HT': '🇭🇹',
'HU': '🇭🇺', 'ID': '🇮🇩', 'IE': '🇮🇪', 'IL': '🇮🇱',
'IM': '🇮🇲', 'IN': '🇮🇳', 'IO': '🇮🇴', 'IQ': '🇮🇶',
'IR': '🇮🇷', 'IS': '🇮🇸', 'IT': '🇮🇹', 'JE': '🇯🇪',
'JM': '🇯🇲', 'JO': '🇯🇴', 'JP': '🇯🇵', 'KE': '🇰🇪',
'KG': '🇰🇬', 'KH': '🇰🇭', 'KI': '🇰🇮', 'KM': '🇰🇲',
'KN': '🇰🇳', 'KP': '🇰🇵', 'KR': '🇰🇷', 'KW': '🇰🇼',
'KY': '🇰🇾', 'KZ': '🇰🇿', 'LA': '🇱🇦', 'LB': '🇱🇧',
'LC': '🇱🇨', 'LI': '🇱🇮', 'LK': '🇱🇰', 'LR': '🇱🇷',
'LS': '🇱🇸', 'LT': '🇱🇹', 'LU': '🇱🇺', 'LV': '🇱🇻',
'LY': '🇱🇾', 'MA': '🇲🇦', 'MC': '🇲🇨', 'MD': '🇲🇩',
'ME': '🇲🇪', 'MF': '🇲🇫', 'MG': '🇲🇬', 'MH': '🇲🇭',
'MK': '🇲🇰', 'ML': '🇲🇱', 'MM': '🇲🇲', 'MN': '🇲🇳',
'MO': '🇲🇴', 'MP': '🇲🇵', 'MQ': '🇲🇶', 'MR': '🇲🇷',
'MS': '🇲🇸', 'MT': '🇲🇹', 'MU': '🇲🇺', 'MV': '🇲🇻',
'MW': '🇲🇼', 'MX': '🇲🇽', 'MY': '🇲🇾', 'MZ': '🇲🇿',
'NA': '🇳🇦', 'NC': '🇳🇨', 'NE': '🇳🇪', 'NF': '🇳🇫',
'NG': '🇳🇬', 'NI': '🇳🇮', 'NL': '🇳🇱', 'NO': '🇳🇴',
'NP': '🇳🇵', 'NR': '🇳🇷', 'NU': '🇳🇺', 'NZ': '🇳🇿',
'OM': '🇴🇲', 'PA': '🇵🇦', 'PE': '🇵🇪', 'PF': '🇵🇫',
'PG': '🇵🇬', 'PH': '🇵🇭', 'PK': '🇵🇰', 'PL': '🇵🇱',
'PM': '🇵🇲', 'PN': '🇵🇳', 'PR': '🇵🇷', 'PS': '🇵🇸',
'PT': '🇵🇹', 'PW': '🇵🇼', 'PY': '🇵🇾', 'QA': '🇶🇦',
'RE': '🇷🇪', 'RO': '🇷🇴', 'RS': '🇷🇸', 'RU': '🇷🇺',
'RW': '🇷🇼', 'SA': '🇸🇦', 'SB': '🇸🇧', 'SC': '🇸🇨',
'SD': '🇸🇩', 'SE': '🇸🇪', 'SG': '🇸🇬', 'SH': '🇸🇭',
'SI': '🇸🇮', 'SJ': '🇸🇯', 'SK': '🇸🇰', 'SL': '🇸🇱',
'SM': '🇸🇲', 'SN': '🇸🇳', 'SO': '🇸🇴', 'SR': '🇸🇷',
'SS': '🇸🇸', 'ST': '🇸🇹', 'SV': '🇸🇻', 'SX': '🇸🇽',
'SY': '🇸🇾', 'SZ': '🇸🇿', 'TC': '🇹🇨', 'TD': '🇹🇩',
'TF': '🇹🇫', 'TG': '🇹🇬', 'TH': '🇹🇭', 'TJ': '🇹🇯',
'TK': '🇹🇰', 'TL': '🇹🇱', 'TM': '🇹🇲', 'TN': '🇹🇳',
'TO': '🇹🇴', 'TR': '🇹🇷', 'TT': '🇹🇹', 'TV': '🇹🇻',
'TW': '🇹🇼', 'TZ': '🇹🇿', 'UA': '🇺🇦', 'UG': '🇺🇬',
'UM': '🇺🇲', 'US': '🇺🇸', 'UY': '🇺🇾', 'UZ': '🇺🇿',
'VA': '🇻🇦', 'VC': '🇻🇨', 'VE': '🇻🇪', 'VG': '🇻🇬',
'VI': '🇻🇮', 'VN': '🇻🇳', 'VU': '🇻🇺', 'WF': '🇼🇫',
'WS': '🇼🇸', 'XK': '🇽🇰', 'YE': '🇾🇪', 'YT': '🇾🇹',
'ZA': '🇿🇦', 'ZM': '🇿🇲', 'ZW': '🇿🇼',
'RELAY': '🏁',
'NOWHERE': '🇦🇶',
}
exclude_list_of_countries = ['IL']
excluded_proxies = []
for (index, c_proxy) in enumerate(corresponding_proxies):
proxy = c_proxy['c_clash']
# decoded_yaml = yaml.safe_load(proxy)
# # for safety i add both scenario
# if type(decoded_yaml) == list:
# proxy = decoded_yaml[0]
# else:
# proxy = decoded_yaml
if type(proxy) == list:
proxy = proxy[0]
server = str(proxy['server'])
if server.replace('.', '').isdigit():
ip = server
else:
try:
# https://cloud.tencent.com/developer/article/1569841
ip = socket.gethostbyname(server)
except Exception:
ip = server
with geoip2.database.Reader('./utils/Country.mmdb') as ip_reader:
try:
response = ip_reader.country(ip)
country_code = response.country.iso_code
except Exception:
ip = '0.0.0.0'
country_code = 'NOWHERE'
if country_code == 'CLOUDFLARE':
country_code = 'RELAY'
elif country_code == 'PRIVATE':
country_code = 'RELAY'
if country_code in emoji:
name_emoji = emoji[country_code]
else:
name_emoji = emoji['NOWHERE']
# proxy_index = proxies_list.index(proxy)
if len(corresponding_proxies) >= 999:
proxy['name'] = f'{name_emoji}{country_code}-{ip}-{index:0>4d}'
elif len(corresponding_proxies) <= 999 and len(corresponding_proxies) > 99:
proxy['name'] = f'{name_emoji}{country_code}-{ip}-{index:0>3d}'
elif len(corresponding_proxies) <= 99:
proxy['name'] = f'{name_emoji}{country_code}-{ip}-{index:0>2d}'
# corresponding_proxies[index]["c_clash"] = f" - {proxy}"
corresponding_proxies[index]["c_clash"] = proxy
# add exclude list
if country_code in exclude_list_of_countries or name_emoji == emoji['NOWHERE']:
excluded_proxies.append(c_proxy)
return list(filter(lambda c: c not in excluded_proxies, corresponding_proxies))
def fix_proxies_duplication(corresponding_proxies: []):
print("\nBefore was " + str(corresponding_proxies.__len__()) + "\n")
begin = 0
raw_length = len(corresponding_proxies)
length = len(corresponding_proxies)
while begin < length:
if (begin + 1) == 1:
print(f'\n-----Restart-----\nStarting Quantity {length}')
elif (begin + 1) % 100 == 0:
print(
f'Current Benchmark {begin + 1}-----Current Quantity {length}')
elif (begin + 1) == length and (begin + 1) % 100 != 0:
repetition = raw_length - length
print(
f'Current Benchmark {begin + 1}-----Current Quantity {length}\nNumber of Repetition {repetition}\n-----Deduplication Completed-----\n')
# proxy_compared = yaml.safe_load(
# corresponding_proxies[begin]["c_clash"])
proxy_compared = corresponding_proxies[begin]["c_clash"]
if type(proxy_compared) == list:
proxy_compared = proxy_compared[0]
begin_2 = begin + 1
while begin_2 <= (length - 1):
check = False
# correspond_next_proxy = yaml.safe_load(
# corresponding_proxies[begin_2]["c_clash"])
correspond_next_proxy = corresponding_proxies[begin_2]["c_clash"]
if type(correspond_next_proxy) == list:
correspond_next_proxy = correspond_next_proxy[0]
if proxy_compared['server'] == correspond_next_proxy['server'] and proxy_compared['port'] == correspond_next_proxy['port']:
check = True
if 'net' in correspond_next_proxy and 'net' in proxy_compared:
if proxy_compared['net'] != correspond_next_proxy['net']:
check = False
if 'tls' in correspond_next_proxy and 'tls' in proxy_compared:
if proxy_compared['tls'] != correspond_next_proxy['tls']:
check = False
#if 'id' in correspond_next_proxy and 'id' in proxy_compared:
# if proxy_compared['id'] != correspond_next_proxy['id']:
# check = False
if 'ws-opts' in correspond_next_proxy and 'ws-opts' in proxy_compared:
if proxy_compared['ws-opts'] != correspond_next_proxy['ws-opts']:
check = False
#if 'uuid' in correspond_next_proxy and 'uuid' in proxy_compared:
# if proxy_compared['uuid'] != correspond_next_proxy['uuid']:
# check = False
#if 'password' in correspond_next_proxy and 'password' in proxy_compared:
# if proxy_compared['password'] != correspond_next_proxy['password']:
# check = False
if 'cipher' in correspond_next_proxy and 'cipher' in proxy_compared:
if proxy_compared['cipher'] != correspond_next_proxy['cipher']:
check = False
if 'type' in correspond_next_proxy and 'type' in proxy_compared:
if proxy_compared['type'] != correspond_next_proxy['type']:
check = False
# due to conversion we could have udp off or on for same proxies
# if 'udp' in correspond_next_proxy and 'udp' in proxy_compared:
# if proxy_compared['udp'] != correspond_next_proxy['udp']:
# check = False
if 'network' in correspond_next_proxy and 'network' in proxy_compared:
if proxy_compared['network'] != correspond_next_proxy['network']:
check = False
if 'obfs' in correspond_next_proxy and 'obfs' in proxy_compared:
if proxy_compared['obfs'] != correspond_next_proxy['obfs']:
check = False
if check:
corresponding_proxies.pop(begin_2)
length -= 1
begin_2 += 1
begin += 1
print("\nNow is " + str(corresponding_proxies.__len__()) + "\n")
return corresponding_proxies
def eternity_convert(file, config, output, provider_file_enabled=True):
# # no conversion from base64 so udp is not a problem
# subconvertor not working with only proxy url
all_provider = subs_function.convert_sub(
"https://raw.githubusercontent.com/mahdibland/SSAggregator/master/sub/airport_merge_base64.txt", 'clash', "http://0.0.0.0:25500", False, extra_options="&udp=false")
########## Add Name to Logs Before making chaages to Proxies ############
temp_providers = all_provider.split('\n')
log_reader = open(log_file, 'r')
log_lines = log_reader.readlines()
log_reader.close()
indexx = 0
for line in temp_providers:
if line != 'proxies:':
#####
server_name = substrings(line, "name:", ",")
server_type = substrings(line, "type:", ",")
log_lines[indexx] = "name: %s | type: %s | %s" % (
server_name, server_type, log_lines[indexx])
#####
indexx += 1
log_writer = open(log_file, 'w')
log_writer.writelines(log_lines)
log_writer.close()
############################################################################
# remove lines with name issue
removed_bad_char = list(filter(lambda x: str(x).__contains__(
"�") == False, all_provider.split("\n")[1:]))
log_lines_without_bad_char = list(filter(lambda x: str(x).__contains__(
"�") == False, log_lines))
# make sure the size of two list are equal
print(
f"removed_bad_char count => {removed_bad_char.__len__()} & log_lines_without_bad_char count => {log_lines_without_bad_char.__len__()}")
# take a part from begining of all lines
num = 200
num = removed_bad_char.__len__() if removed_bad_char.__len__() <= num else num
# remove zero speed lines
removed_bad_char_without_zero = []
for (index, item) in enumerate(removed_bad_char[0:num + 1]):
if log_lines_without_bad_char[index].__contains__("avg_speed: 0.0 MB") == False:
removed_bad_char_without_zero.append(item)
# convert the safe partition to yaml format
all_provider = "proxies:\n" + "\n".join(removed_bad_char_without_zero)
lines = re.split(r'\n+', all_provider)
proxy_all = []
indexx = 0
for line in lines:
if line != 'proxies:':
try:
name = substrings(line, "name:", ",")
speed = substrings(
log_lines_without_bad_char[indexx], "avg_speed:", "|")
line = re.sub("name:( |)(.*?),", "name: %s | %s," %
(name, speed), line)
except:
print(log_lines_without_bad_char[indexx])
pass
# line = ' ' + line
line = line.replace('- ', '')
linee = yaml.safe_load(line)
proxy_all.append(linee)
indexx += 1
if provider_file_enabled:
providers_files = {
'all': provider_path + 'provider-all-airport.yml',
}
eternity_providers = {
'all': all_provider,
}
print('Writing content to provider')
for key in providers_files.keys():
provider_all = open(providers_files[key], 'w', encoding='utf-8')
provider_all.write(eternity_providers[key])
provider_all.close()
print('Done!\n')
# 创建完全配置的Eternity.yml
config_f = open(config_file, 'r', encoding='utf-8')
config_raw = config_f.read()
config_f.close()
config = yaml.safe_load(config_raw)
all_provider_dic = {'proxies': []}
provider_dic = {
'all': all_provider_dic,
}
for key in eternity_providers.keys(): # 将节点转换为字典形式
provider_load = yaml.safe_load(eternity_providers[key])
provider_dic[key].update(provider_load)
# 创建节点名列表
all_name = []
name_dict = {
'all': all_name,
}
indexx = 0
for key in provider_dic.keys():
if not provider_dic[key]['proxies'] is None:
for proxy in provider_dic[key]['proxies']:
try:
speed = substrings(
log_lines_without_bad_char[indexx], "avg_speed:", "|")
name_dict[key].append(
str(proxy['name']).replace(" ", "") + " | " + speed)
except:
name_dict[key].append(str(proxy['name']).replace(" ", ""))
print(log_lines_without_bad_char[indexx])
indexx += 1
if provider_dic[key]['proxies'] is None:
name_dict[key].append('DIRECT')
# 策略分组添加节点名
proxy_groups = config['proxy-groups']
proxy_group_fill = []
for rule in proxy_groups:
if rule['proxies'] is None: # 不是空集加入待加入名称列表
proxy_group_fill.append(rule['name'])
full_size = all_name.__len__()
part_size = int(full_size / 4)
last_size = full_size - (part_size * 3)
for rule_name in proxy_group_fill:
for rule in proxy_groups:
if rule['name'] == rule_name:
# todo it changes from Main group to tier names
if "Tier 1" in rule_name:
rule.update({'proxies': all_name[0:part_size]})
elif "Tier 2" in rule_name:
rule.update({'proxies': all_name[part_size:part_size*2]})
elif "Tier 3" in rule_name:
rule.update({'proxies': all_name[part_size*2:part_size*3]})
elif "Tier 4" in rule_name:
rule.update({'proxies': all_name[part_size*3:full_size]})
config.update(all_provider_dic)
config.update({'proxy-groups': proxy_groups})
config.update({'proxies': proxy_all})
config_yaml = yaml.dump(config, default_flow_style=False, sort_keys=False,
allow_unicode=True, width=750, indent=2, Dumper=NoAliasDumper)
Eternity_yml = open(output, 'w+', encoding='utf-8')
Eternity_yml.write(config_yaml)
Eternity_yml.close() | null |
162,415 | import sub_convert
from list_update import update_url
from get_subs import subs
import json
import re
import os
import yaml
from urllib import request
def add_valid(line):
if (line.__contains__("ssr://") or line.__contains__("ss://")
or line.__contains__("trojan://") or line.__contains__("vmess://")):
return line
return '' | null |
162,416 | import re
import yaml
import json
import re
import time
import os
from sub_convert import sub_convert
from subs_function import subs_function
from list_merge import sub_merge
log_file = './LogInfo.txt'
provider_path = './update/provider/'
config_file = './update/provider/config.yml'
class NoAliasDumper(yaml.SafeDumper):
def ignore_aliases(self, data):
return True
def substrings(string, left, right):
value = string.replace('\n', '').replace(' ', '')
start = value.index(left)
end = value[start:].index(
right) + (value.__len__() - value[start:].__len__())
final_value = value[start:end].replace(left, '')
return final_value
class subs_function:
def convert_sub(url: str, output: str, convertor_host="http://0.0.0.0:25500", show_url=False, extra_options=""):
url = urllib.parse.quote(url, safe='')
try:
convert_url = f'{convertor_host}/sub?target={output}&url={url}&insert=false&emoji=true&list=true&tfo=false&scv=false&fdn=false&sort=false{extra_options}'
result = requests.get(convert_url, timeout=240).text
if show_url:
print(f"url to host for {output} : {convert_url}")
if result == "No nodes were found!":
return "Err: No nodes found"
return result
except Exception as e:
print(e)
return "Err: failed to parse sub"
def is_line_valid(line, support_vless=False):
if (line.startswith("ssr://") or line.startswith("ss://")
or line.startswith("trojan://") or line.startswith("vmess://")):
return line
if(support_vless and line.startswith("vless://")):
return line
return ''
def fix_proxies_name(corresponding_proxies: []):
emoji = {
'AD': '🇦🇩', 'AE': '🇦🇪', 'AF': '🇦🇫', 'AG': '🇦🇬',
'AI': '🇦🇮', 'AL': '🇦🇱', 'AM': '🇦🇲', 'AO': '🇦🇴',
'AQ': '🇦🇶', 'AR': '🇦🇷', 'AS': '🇦🇸', 'AT': '🇦🇹',
'AU': '🇦🇺', 'AW': '🇦🇼', 'AX': '🇦🇽', 'AZ': '🇦🇿',
'BA': '🇧🇦', 'BB': '🇧🇧', 'BD': '🇧🇩', 'BE': '🇧🇪',
'BF': '🇧🇫', 'BG': '🇧🇬', 'BH': '🇧🇭', 'BI': '🇧🇮',
'BJ': '🇧🇯', 'BL': '🇧🇱', 'BM': '🇧🇲', 'BN': '🇧🇳',
'BO': '🇧🇴', 'BQ': '🇧🇶', 'BR': '🇧🇷', 'BS': '🇧🇸',
'BT': '🇧🇹', 'BV': '🇧🇻', 'BW': '🇧🇼', 'BY': '🇧🇾',
'BZ': '🇧🇿', 'CA': '🇨🇦', 'CC': '🇨🇨', 'CD': '🇨🇩',
'CF': '🇨🇫', 'CG': '🇨🇬', 'CH': '🇨🇭', 'CI': '🇨🇮',
'CK': '🇨🇰', 'CL': '🇨🇱', 'CM': '🇨🇲', 'CN': '🇨🇳',
'CO': '🇨🇴', 'CR': '🇨🇷', 'CU': '🇨🇺', 'CV': '🇨🇻',
'CW': '🇨🇼', 'CX': '🇨🇽', 'CY': '🇨🇾', 'CZ': '🇨🇿',
'DE': '🇩🇪', 'DJ': '🇩🇯', 'DK': '🇩🇰', 'DM': '🇩🇲',
'DO': '🇩🇴', 'DZ': '🇩🇿', 'EC': '🇪🇨', 'EE': '🇪🇪',
'EG': '🇪🇬', 'EH': '🇪🇭', 'ER': '🇪🇷', 'ES': '🇪🇸',
'ET': '🇪🇹', 'EU': '🇪🇺', 'FI': '🇫🇮', 'FJ': '🇫🇯',
'FK': '🇫🇰', 'FM': '🇫🇲', 'FO': '🇫🇴', 'FR': '🇫🇷',
'GA': '🇬🇦', 'GB': '🇬🇧', 'GD': '🇬🇩', 'GE': '🇬🇪',
'GF': '🇬🇫', 'GG': '🇬🇬', 'GH': '🇬🇭', 'GI': '🇬🇮',
'GL': '🇬🇱', 'GM': '🇬🇲', 'GN': '🇬🇳', 'GP': '🇬🇵',
'GQ': '🇬🇶', 'GR': '🇬🇷', 'GS': '🇬🇸', 'GT': '🇬🇹',
'GU': '🇬🇺', 'GW': '🇬🇼', 'GY': '🇬🇾', 'HK': '🇭🇰',
'HM': '🇭🇲', 'HN': '🇭🇳', 'HR': '🇭🇷', 'HT': '🇭🇹',
'HU': '🇭🇺', 'ID': '🇮🇩', 'IE': '🇮🇪', 'IL': '🇮🇱',
'IM': '🇮🇲', 'IN': '🇮🇳', 'IO': '🇮🇴', 'IQ': '🇮🇶',
'IR': '🇮🇷', 'IS': '🇮🇸', 'IT': '🇮🇹', 'JE': '🇯🇪',
'JM': '🇯🇲', 'JO': '🇯🇴', 'JP': '🇯🇵', 'KE': '🇰🇪',
'KG': '🇰🇬', 'KH': '🇰🇭', 'KI': '🇰🇮', 'KM': '🇰🇲',
'KN': '🇰🇳', 'KP': '🇰🇵', 'KR': '🇰🇷', 'KW': '🇰🇼',
'KY': '🇰🇾', 'KZ': '🇰🇿', 'LA': '🇱🇦', 'LB': '🇱🇧',
'LC': '🇱🇨', 'LI': '🇱🇮', 'LK': '🇱🇰', 'LR': '🇱🇷',
'LS': '🇱🇸', 'LT': '🇱🇹', 'LU': '🇱🇺', 'LV': '🇱🇻',
'LY': '🇱🇾', 'MA': '🇲🇦', 'MC': '🇲🇨', 'MD': '🇲🇩',
'ME': '🇲🇪', 'MF': '🇲🇫', 'MG': '🇲🇬', 'MH': '🇲🇭',
'MK': '🇲🇰', 'ML': '🇲🇱', 'MM': '🇲🇲', 'MN': '🇲🇳',
'MO': '🇲🇴', 'MP': '🇲🇵', 'MQ': '🇲🇶', 'MR': '🇲🇷',
'MS': '🇲🇸', 'MT': '🇲🇹', 'MU': '🇲🇺', 'MV': '🇲🇻',
'MW': '🇲🇼', 'MX': '🇲🇽', 'MY': '🇲🇾', 'MZ': '🇲🇿',
'NA': '🇳🇦', 'NC': '🇳🇨', 'NE': '🇳🇪', 'NF': '🇳🇫',
'NG': '🇳🇬', 'NI': '🇳🇮', 'NL': '🇳🇱', 'NO': '🇳🇴',
'NP': '🇳🇵', 'NR': '🇳🇷', 'NU': '🇳🇺', 'NZ': '🇳🇿',
'OM': '🇴🇲', 'PA': '🇵🇦', 'PE': '🇵🇪', 'PF': '🇵🇫',
'PG': '🇵🇬', 'PH': '🇵🇭', 'PK': '🇵🇰', 'PL': '🇵🇱',
'PM': '🇵🇲', 'PN': '🇵🇳', 'PR': '🇵🇷', 'PS': '🇵🇸',
'PT': '🇵🇹', 'PW': '🇵🇼', 'PY': '🇵🇾', 'QA': '🇶🇦',
'RE': '🇷🇪', 'RO': '🇷🇴', 'RS': '🇷🇸', 'RU': '🇷🇺',
'RW': '🇷🇼', 'SA': '🇸🇦', 'SB': '🇸🇧', 'SC': '🇸🇨',
'SD': '🇸🇩', 'SE': '🇸🇪', 'SG': '🇸🇬', 'SH': '🇸🇭',
'SI': '🇸🇮', 'SJ': '🇸🇯', 'SK': '🇸🇰', 'SL': '🇸🇱',
'SM': '🇸🇲', 'SN': '🇸🇳', 'SO': '🇸🇴', 'SR': '🇸🇷',
'SS': '🇸🇸', 'ST': '🇸🇹', 'SV': '🇸🇻', 'SX': '🇸🇽',
'SY': '🇸🇾', 'SZ': '🇸🇿', 'TC': '🇹🇨', 'TD': '🇹🇩',
'TF': '🇹🇫', 'TG': '🇹🇬', 'TH': '🇹🇭', 'TJ': '🇹🇯',
'TK': '🇹🇰', 'TL': '🇹🇱', 'TM': '🇹🇲', 'TN': '🇹🇳',
'TO': '🇹🇴', 'TR': '🇹🇷', 'TT': '🇹🇹', 'TV': '🇹🇻',
'TW': '🇹🇼', 'TZ': '🇹🇿', 'UA': '🇺🇦', 'UG': '🇺🇬',
'UM': '🇺🇲', 'US': '🇺🇸', 'UY': '🇺🇾', 'UZ': '🇺🇿',
'VA': '🇻🇦', 'VC': '🇻🇨', 'VE': '🇻🇪', 'VG': '🇻🇬',
'VI': '🇻🇮', 'VN': '🇻🇳', 'VU': '🇻🇺', 'WF': '🇼🇫',
'WS': '🇼🇸', 'XK': '🇽🇰', 'YE': '🇾🇪', 'YT': '🇾🇹',
'ZA': '🇿🇦', 'ZM': '🇿🇲', 'ZW': '🇿🇼',
'RELAY': '🏁',
'NOWHERE': '🇦🇶',
}
exclude_list_of_countries = ['IL']
excluded_proxies = []
for (index, c_proxy) in enumerate(corresponding_proxies):
proxy = c_proxy['c_clash']
# decoded_yaml = yaml.safe_load(proxy)
# # for safety i add both scenario
# if type(decoded_yaml) == list:
# proxy = decoded_yaml[0]
# else:
# proxy = decoded_yaml
if type(proxy) == list:
proxy = proxy[0]
server = str(proxy['server'])
if server.replace('.', '').isdigit():
ip = server
else:
try:
# https://cloud.tencent.com/developer/article/1569841
ip = socket.gethostbyname(server)
except Exception:
ip = server
with geoip2.database.Reader('./utils/Country.mmdb') as ip_reader:
try:
response = ip_reader.country(ip)
country_code = response.country.iso_code
except Exception:
ip = '0.0.0.0'
country_code = 'NOWHERE'
if country_code == 'CLOUDFLARE':
country_code = 'RELAY'
elif country_code == 'PRIVATE':
country_code = 'RELAY'
if country_code in emoji:
name_emoji = emoji[country_code]
else:
name_emoji = emoji['NOWHERE']
# proxy_index = proxies_list.index(proxy)
if len(corresponding_proxies) >= 999:
proxy['name'] = f'{name_emoji}{country_code}-{ip}-{index:0>4d}'
elif len(corresponding_proxies) <= 999 and len(corresponding_proxies) > 99:
proxy['name'] = f'{name_emoji}{country_code}-{ip}-{index:0>3d}'
elif len(corresponding_proxies) <= 99:
proxy['name'] = f'{name_emoji}{country_code}-{ip}-{index:0>2d}'
# corresponding_proxies[index]["c_clash"] = f" - {proxy}"
corresponding_proxies[index]["c_clash"] = proxy
# add exclude list
if country_code in exclude_list_of_countries or name_emoji == emoji['NOWHERE']:
excluded_proxies.append(c_proxy)
return list(filter(lambda c: c not in excluded_proxies, corresponding_proxies))
def fix_proxies_duplication(corresponding_proxies: []):
print("\nBefore was " + str(corresponding_proxies.__len__()) + "\n")
begin = 0
raw_length = len(corresponding_proxies)
length = len(corresponding_proxies)
while begin < length:
if (begin + 1) == 1:
print(f'\n-----Restart-----\nStarting Quantity {length}')
elif (begin + 1) % 100 == 0:
print(
f'Current Benchmark {begin + 1}-----Current Quantity {length}')
elif (begin + 1) == length and (begin + 1) % 100 != 0:
repetition = raw_length - length
print(
f'Current Benchmark {begin + 1}-----Current Quantity {length}\nNumber of Repetition {repetition}\n-----Deduplication Completed-----\n')
# proxy_compared = yaml.safe_load(
# corresponding_proxies[begin]["c_clash"])
proxy_compared = corresponding_proxies[begin]["c_clash"]
if type(proxy_compared) == list:
proxy_compared = proxy_compared[0]
begin_2 = begin + 1
while begin_2 <= (length - 1):
check = False
# correspond_next_proxy = yaml.safe_load(
# corresponding_proxies[begin_2]["c_clash"])
correspond_next_proxy = corresponding_proxies[begin_2]["c_clash"]
if type(correspond_next_proxy) == list:
correspond_next_proxy = correspond_next_proxy[0]
if proxy_compared['server'] == correspond_next_proxy['server'] and proxy_compared['port'] == correspond_next_proxy['port']:
check = True
if 'net' in correspond_next_proxy and 'net' in proxy_compared:
if proxy_compared['net'] != correspond_next_proxy['net']:
check = False
if 'tls' in correspond_next_proxy and 'tls' in proxy_compared:
if proxy_compared['tls'] != correspond_next_proxy['tls']:
check = False
#if 'id' in correspond_next_proxy and 'id' in proxy_compared:
# if proxy_compared['id'] != correspond_next_proxy['id']:
# check = False
if 'ws-opts' in correspond_next_proxy and 'ws-opts' in proxy_compared:
if proxy_compared['ws-opts'] != correspond_next_proxy['ws-opts']:
check = False
#if 'uuid' in correspond_next_proxy and 'uuid' in proxy_compared:
# if proxy_compared['uuid'] != correspond_next_proxy['uuid']:
# check = False
#if 'password' in correspond_next_proxy and 'password' in proxy_compared:
# if proxy_compared['password'] != correspond_next_proxy['password']:
# check = False
if 'cipher' in correspond_next_proxy and 'cipher' in proxy_compared:
if proxy_compared['cipher'] != correspond_next_proxy['cipher']:
check = False
if 'type' in correspond_next_proxy and 'type' in proxy_compared:
if proxy_compared['type'] != correspond_next_proxy['type']:
check = False
# due to conversion we could have udp off or on for same proxies
# if 'udp' in correspond_next_proxy and 'udp' in proxy_compared:
# if proxy_compared['udp'] != correspond_next_proxy['udp']:
# check = False
if 'network' in correspond_next_proxy and 'network' in proxy_compared:
if proxy_compared['network'] != correspond_next_proxy['network']:
check = False
if 'obfs' in correspond_next_proxy and 'obfs' in proxy_compared:
if proxy_compared['obfs'] != correspond_next_proxy['obfs']:
check = False
if check:
corresponding_proxies.pop(begin_2)
length -= 1
begin_2 += 1
begin += 1
print("\nNow is " + str(corresponding_proxies.__len__()) + "\n")
return corresponding_proxies
The provided code snippet includes necessary dependencies for implementing the `eternity_convert` function. Write a Python function `def eternity_convert(file, config, output, provider_file_enabled=True)` to solve the following problem:
yaml_format = ruamel.yaml.YAML() # https://www.coder.work/article/4975478 yaml_format.indent(mapping=2, sequence=4, offset=2) config_yaml = yaml_format.dump(config, sys.stdout)
Here is the function:
def eternity_convert(file, config, output, provider_file_enabled=True):
# # no conversion from base64 so udp is not a problem
# subconvertor not working with only proxy url
all_provider = subs_function.convert_sub(
"https://raw.githubusercontent.com/mahdibland/SSAggregator/master/sub/sub_merge_base64.txt", 'clash', "http://0.0.0.0:25500", False, extra_options="&udp=false")
########## Add Name to Logs Before making chaages to Proxies ############
temp_providers = all_provider.split('\n')
log_reader = open(log_file, 'r')
log_lines = log_reader.readlines()
log_reader.close()
indexx = 0
for line in temp_providers:
if line != 'proxies:':
try:
#####
server_name = substrings(line, "name:", ",")
server_type = substrings(line, "type:", ",")
log_lines[indexx] = "name: %s | type: %s | %s" % (
server_name, server_type, log_lines[indexx])
#####
indexx += 1
except:
print("log lines length != providers length")
log_writer = open(log_file, 'w')
log_writer.writelines(log_lines)
log_writer.close()
############################################################################
# remove lines with name issue
removed_bad_char = list(filter(lambda x: str(x).__contains__(
"�") == False, all_provider.split("\n")[1:]))
log_lines_without_bad_char = list(filter(lambda x: str(x).__contains__(
"�") == False, log_lines))
# make sure the size of two list are equal
print(
f"removed_bad_char count => {removed_bad_char.__len__()} & log_lines_without_bad_char count => {log_lines_without_bad_char.__len__()}")
# take a part from begining of all lines
num = 200
num = removed_bad_char.__len__() if removed_bad_char.__len__() <= num else num
# convert the safe partition to yaml format
all_provider = "proxies:\n" + "\n".join(removed_bad_char[0:num + 1])
lines = re.split(r'\n+', all_provider)
proxy_all = []
# us_proxy = []
# hk_proxy = []
# sg_proxy = []
# others_proxy = []
indexx = 0
skip_names_index = []
for line in lines:
if line != 'proxies:':
try:
name = substrings(line, "name:", ",")
speed = substrings(
log_lines_without_bad_char[indexx], "avg_speed:", "|")
line = re.sub("name:( |)(.*?),", "name: %s | %s," %
(name, speed), line)
except:
print(log_lines_without_bad_char[indexx])
pass
# line = ' ' + line
line = line.replace('- ', '')
line_parsed = yaml.safe_load(line)
if "password" in line_parsed:
line_parsed.update({"password": str(line_parsed.get("password"))})
# interpreted as a floating-point number
if re.match(r'^\d+\.?\d*[eE][-+]?\d+$', line_parsed["password"]):
skip_names_index.append(indexx)
indexx += 1
continue
linee = line_parsed
proxy_all.append(linee)
indexx += 1
if provider_file_enabled:
providers_files = {
'all': provider_path + 'provider-all.yml',
# 'others': provider_path + 'provider-others.yml',
# 'us': provider_path + 'provider-us.yml',
# 'hk': provider_path + 'provider-hk.yml',
# 'sg': provider_path + 'provider-sg.yml'
}
eternity_providers = {
'all': all_provider,
# 'others': others_provider,
# 'us': us_provider,
# 'hk': hk_provider,
# 'sg': sg_provider
}
print('Writing content to provider')
for key in providers_files.keys():
provider_all = open(providers_files[key], 'w', encoding='utf-8')
provider_all.write(eternity_providers[key])
provider_all.close()
print('Done!\n')
# 创建完全配置的Eternity.yml
config_f = open(config_file, 'r', encoding='utf-8')
config_raw = config_f.read()
config_f.close()
config = yaml.safe_load(config_raw)
all_provider_dic = {'proxies': []}
# others_provider_dic = {'proxies': []}
# us_provider_dic = {'proxies': []}
# hk_provider_dic = {'proxies': []}
# sg_provider_dic = {'proxies': []}
provider_dic = {
'all': all_provider_dic,
# 'others': others_provider_dic,
# 'us': us_provider_dic,
# 'hk': hk_provider_dic,
# 'sg': sg_provider_dic
}
for key in eternity_providers.keys(): # 将节点转换为字典形式
provider_load = yaml.safe_load(eternity_providers[key])
provider_dic[key].update(provider_load)
# 创建节点名列表
all_name = []
# others_name = []
# us_name = []
# hk_name = []
# sg_name = []
name_dict = {
'all': all_name,
# 'others': others_name,
# 'us': us_name,
# 'hk': hk_name,
# 'sg': sg_name
}
indexx = 0
for key in provider_dic.keys():
if not provider_dic[key]['proxies'] is None:
for proxy in provider_dic[key]['proxies']:
if indexx in skip_names_index:
indexx += 1
continue
try:
speed = substrings(
log_lines_without_bad_char[indexx], "avg_speed:", "|")
name_dict[key].append(
str(proxy['name']).replace(" ", "") + " | " + speed)
except:
name_dict[key].append(str(proxy['name']).replace(" ", ""))
print(log_lines_without_bad_char[indexx])
indexx += 1
if provider_dic[key]['proxies'] is None:
name_dict[key].append('DIRECT')
# 策略分组添加节点名
proxy_groups = config['proxy-groups']
proxy_group_fill = []
for rule in proxy_groups:
if rule['proxies'] is None: # 不是空集加入待加入名称列表
proxy_group_fill.append(rule['name'])
full_size = all_name.__len__()
part_size = int(full_size / 4)
last_size = full_size - (part_size * 3)
for rule_name in proxy_group_fill:
for rule in proxy_groups:
if rule['name'] == rule_name:
# if '美国' in rule_name:
# rule.update({'proxies': us_name})
# elif '香港' in rule_name:
# rule.update({'proxies': hk_name})
# elif '狮城' in rule_name or '新加坡' in rule_name:
# rule.update({'proxies': sg_name})
# elif '其他' in rule_name:
# rule.update({'proxies': others_name})
# else:
# todo it changes from Main group to tier names
if "Tier 1" in rule_name:
rule.update({'proxies': all_name[0:part_size]})
elif "Tier 2" in rule_name:
rule.update({'proxies': all_name[part_size:part_size*2]})
elif "Tier 3" in rule_name:
rule.update({'proxies': all_name[part_size*2:part_size*3]})
elif "Tier 4" in rule_name:
rule.update({'proxies': all_name[part_size*3:full_size]})
config.update(all_provider_dic)
config.update({'proxy-groups': proxy_groups})
config.update({'proxies': proxy_all})
"""
yaml_format = ruamel.yaml.YAML() # https://www.coder.work/article/4975478
yaml_format.indent(mapping=2, sequence=4, offset=2)
config_yaml = yaml_format.dump(config, sys.stdout)
"""
config_yaml = yaml.dump(config, default_flow_style=False, sort_keys=False,
allow_unicode=True, width=750, indent=2, Dumper=NoAliasDumper)
Eternity_yml = open(output, 'w+', encoding='utf-8')
Eternity_yml.write(config_yaml)
Eternity_yml.close() | yaml_format = ruamel.yaml.YAML() # https://www.coder.work/article/4975478 yaml_format.indent(mapping=2, sequence=4, offset=2) config_yaml = yaml_format.dump(config, sys.stdout) |
162,417 | import re
import yaml
import json
import re
import time
import os
from sub_convert import sub_convert
from subs_function import subs_function
from list_merge import sub_merge
update_path = './update/'
class sub_convert():
def main(raw_input, input_type='url', output_type='url', custom_set={'dup_rm_enabled': False, 'format_name_enabled': False}):
def format(sub_content, output=False):
def makeup(input, dup_rm_enabled=False, format_name_enabled=False):
def yaml_encode(url_content, output=True):
def base64_encode(url_content):
def yaml_decode(url_content):
def base64_decode(url_content):
def convert_remote(url='', output_type='clash', host='http://127.0.0.1:25500'):
def backup(file):
try:
t = time.localtime()
date = time.strftime('%y%m', t)
date_day = time.strftime('%y%m%d', t)
file_eternity = open(file, 'r', encoding='utf-8')
sub_content = file_eternity.read()
file_eternity.close()
try:
os.mkdir(f'{update_path}{date}')
except FileExistsError:
pass
txt_dir = update_path + date + '/' + date_day + '.txt' # 生成$MM$DD.txt文件名
file = open(txt_dir, 'w', encoding='utf-8')
file.write(sub_convert.base64_decode(sub_content))
file.close()
except Exception as e:
print("Error While backup EterniyBase_file => if you use method yaml ignore this") | null |
162,419 | import os
import subprocess
import platform
base_link = "http://python.iswbm.com/en/latest/"
def get_file_info(filename):
def make_line(chapter, file):
page_name, _ = os.path.splitext(file)
(index, title) = get_file_info(file)
url = base_link + chapter + "/" + page_name + ".html"
item_list = ["-", index, "[{}]({})\n".format(title, url)]
return " ".join(item_list) | null |
162,424 | import os
import re
import git
import linecache
from glob import glob
source_dir = os.path.join(pwd, "source")
def get_all_chapter():
all_chapters_path = []
os.chdir(source_dir)
for dir_name in glob("c*"):
if dir_name == "chapters" or dir_name == "conf.py":
continue
all_chapters_path.append(os.path.join(dir_name))
return all_chapters_path | null |
162,425 | import os
import re
import git
import linecache
from glob import glob
pwd = os.getcwd()
def get_chapter_name(file):
return linecache.getline(file, 2).strip()
def generate_mapping(all_chapters_path):
mapping = dict.fromkeys([os.path.basename(chapter_path) for chapter_path in all_chapters_path])
for key in mapping.keys():
chapter_file = os.path.join(pwd, "source", "chapters", key.replace("c", "p") + ".rst")
mapping[key] = get_chapter_name(chapter_file)
return mapping | null |
162,426 | import os
import re
import git
import linecache
from glob import glob
source_dir = os.path.join(pwd, "source")
def get_title(file):
first_line = linecache.getline(file, 1)
if first_line.startswith("#"):
return first_line.replace("# ", "").strip()
def get_toc_info(all_chapters_path):
toc = {}
for dir_name in all_chapters_path:
chapter_toc = {}
os.chdir(os.path.join(source_dir, dir_name))
for file_name in sorted(glob(dir_name + "*.md")):
section = int(re.findall(r"c\d{2}_(\d{2}).md", file_name)[0])
md_path = os.path.join("http://pycharm.iswbm.com/", dir_name, file_name.replace("md", "html"))
title = get_title(file_name)
if not title:
continue
chapter_toc[section] = (title, md_path)
toc[dir_name] = chapter_toc
return toc | null |
162,427 | import os
import re
import git
import linecache
from glob import glob
def print_md_toc(toc_info, mapping):
for chapter in sorted(toc_info.items(), key=lambda item: item[0]):
posts = chapter[1]
chapter_name = mapping[chapter[0]]
print(f"- **{chapter_name}**")
for post in sorted(posts.items(), key=lambda item:item[0]):
# print title only
# print(f"{post[1][0]}")
print(" ", f"* [{post[1][0]}]({post[1][1]})") | null |
162,428 | import setuptools
import os
import platform
def get_data_files(data_dir, prefix=''):
file_dict = {}
for root, dirs, files in os.walk(data_dir, topdown=False):
for name in files:
if prefix+root not in file_dict:
file_dict[prefix+root] = []
file_dict[prefix+root].append(os.path.join(root, name))
return [(k, v) for k, v in file_dict.items()] | null |
162,429 | import cv2
import numpy as np
from PIL import Image
from torchvision import transforms as T
from torchvision.transforms import functional as F
class DualRandomCrop:
def __init__(self, size):
self.size = size
def __call__(self, img):
crop_params = T.RandomCrop.get_params(img['img'], self.size)
img['img'] = F.crop(img['img'], *crop_params)
if "mask" in img:
img['mask'] = self.crop(img['mask'], *crop_params)
if "cond" in img:
img['cond'] = F.crop(img['cond'], *crop_params)
return img, crop_params[:2]
def crop(img: np.ndarray, top: int, left: int, height: int, width: int) -> np.ndarray:
right = left+width
bottom = top+height
return img[top:bottom, left:right, ...]
def resize_crop_fix(img, target_size, mask_interp=cv2.INTER_CUBIC):
w, h = img['img'].size
if w == target_size[0] and h == target_size[1]:
return img, [h,w,0,0,h,w]
ratio_img = w/h
if ratio_img>target_size[0]/target_size[1]:
new_size = (round(ratio_img*target_size[1]), target_size[1])
interp_type = Image.LANCZOS if h>target_size[1] else Image.BICUBIC
else:
new_size = (target_size[0], round(target_size[0]/ratio_img))
interp_type = Image.LANCZOS if w>target_size[0] else Image.BICUBIC
img['img'] = img['img'].resize(new_size, interp_type)
if "mask" in img:
img['mask'] = cv2.resize(img['mask'], new_size, interpolation=mask_interp)
if "cond" in img:
img['cond'] = img['cond'].resize(new_size, interp_type)
img, crop_coord = DualRandomCrop(target_size[::-1])(img)
return img, [*new_size, *crop_coord[::-1], *target_size] | null |
162,430 | import cv2
import numpy as np
from PIL import Image
from torchvision import transforms as T
from torchvision.transforms import functional as F
class DualRandomCrop:
def __init__(self, size):
self.size = size
def __call__(self, img):
crop_params = T.RandomCrop.get_params(img['img'], self.size)
img['img'] = F.crop(img['img'], *crop_params)
if "mask" in img:
img['mask'] = self.crop(img['mask'], *crop_params)
if "cond" in img:
img['cond'] = F.crop(img['cond'], *crop_params)
return img, crop_params[:2]
def crop(img: np.ndarray, top: int, left: int, height: int, width: int) -> np.ndarray:
right = left+width
bottom = top+height
return img[top:bottom, left:right, ...]
def pad_crop_fix(img, target_size):
w, h = img['img'].size
if w == target_size[0] and h == target_size[1]:
return img, (h,w,0,0,h,w)
pad_size = [0, 0, max(target_size[0]-w, 0), max(target_size[1]-h, 0)]
if pad_size[2]>0 or pad_size[3]>0:
img['img'] = F.pad(img['img'], pad_size)
if "mask" in img:
img['mask'] = np.pad(img['mask'], ((0, pad_size[3]), (0, pad_size[2])), 'constant', constant_values=(0, 0))
if "cond" in img:
img['cond'] = F.pad(img['cond'], pad_size)
if pad_size[2]>0 and pad_size[3]>0:
return img, (h,w,0,0,h,w) # No need to crop
else:
img, crop_coord = DualRandomCrop(target_size[::-1])(img)
return img, crop_coord | null |
162,431 | import torch
from torch.utils.data.distributed import DistributedSampler
from typing import Iterator
import platform
import math
class DistributedCycleSampler(DistributedSampler):
_cycle = 1000
def __iter__(self) -> Iterator:
def _iter():
while True:
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
for idx in indices:
yield idx
self.epoch+=1
if self.epoch>=self._cycle:
break
return _iter()
def __len__(self) -> int:
return self.num_samples #*self._cycle
def get_sampler():
# Fix DataLoader frequently reload bugs in windows
if platform.system().lower() == 'windows':
return DistributedCycleSampler
else:
return DistributedSampler | null |
162,432 | import json
import os
import glob
import yaml
from typing import Dict
from loguru import logger
from hcpdiff.utils.img_size_tool import types_support
import os
class JsonCaptionLoader(BaseCaptionLoader):
def _load(self):
class YamlCaptionLoader(BaseCaptionLoader):
def _load(self):
class TXTCaptionLoader(BaseCaptionLoader):
def _load(self):
def auto_caption_loader(path):
if os.path.isdir(path):
json_files = glob.glob(os.path.join(path, '*.json'))
if json_files:
return JsonCaptionLoader(json_files[0])
yaml_files = [
*glob.glob(os.path.join(path, '*.yaml')),
*glob.glob(os.path.join(path, '*.yml')),
]
if yaml_files:
return YamlCaptionLoader(yaml_files[0])
txt_files = glob.glob(os.path.join(path, '*.txt'))
if txt_files:
return TXTCaptionLoader(path)
raise FileNotFoundError(f'Caption file not found in directory {path!r}.')
elif os.path.isfile(path):
_, ext = os.path.splitext(path)
if ext == '.json':
return JsonCaptionLoader(path)
elif ext in {'.yaml', '.yml'}:
return YamlCaptionLoader(path)
else:
raise FileNotFoundError(f'Unknown caption file {path!r}.')
else:
raise FileNotFoundError(f'Unknown caption file type {path!r}.') | null |
162,433 |
def convert_to_webui_maybe_old(new_func):
def convert_to_webui_(self, state, prefix):
sd_covert = {}
for k, v in state.items():
# new lora format
if k.endswith('W_down'):
return new_func(self, state, prefix)
# old lora format
model_k, lora_k = k.split('.___.' if ('alpha' in k or 'scale' in k) else '.___.layer.', 1)
sd_covert[f"{prefix}{model_k.replace('.', '_')}.{lora_k}"] = v
return sd_covert
return convert_to_webui_ | null |
162,434 |
def convert_to_webui_xl_maybe_old(new_func):
def convert_to_webui_xl_(self, state, prefix):
sd_convert = {}
for k, v in state.items():
# new lora format
if k.endswith('W_down'):
return new_func(self, state, prefix)
# old lora format
model_k, lora_k = k.split('.___.' if ('alpha' in k or 'scale' in k) else '.___.layer.', 1)
new_k = f"{prefix}{model_k.replace('.', '_')}.{lora_k}"
if 'clip' in new_k:
new_k = new_k.replace('_clip_B', '1') if 'clip_B' in new_k else new_k.replace('_clip_bigG', '2')
sd_convert[new_k] = v
return sd_convert
return convert_to_webui_xl_ | null |
162,435 | import argparse
import math
import os
import time
import warnings
from functools import partial
import diffusers
import hydra
import torch
import torch.utils.checkpoint
import torch.utils.checkpoint
import torch.utils.data
import transformers
from accelerate import Accelerator, DistributedDataParallelKwargs
from accelerate.utils import set_seed
from diffusers import AutoencoderKL, UNet2DConditionModel, DDPMScheduler
from diffusers.utils.import_utils import is_xformers_available
from omegaconf import OmegaConf
from hcpdiff.ckpt_manager import CkptManagerPKL, CkptManagerSafe
from hcpdiff.data import RatioBucket, DataGroup, get_sampler
from hcpdiff.deprecated.cfg_converter import TrainCFGConverter
from hcpdiff.loggers import LoggerGroup
from hcpdiff.models import CFGContext, DreamArtistPTContext, TEUnetWrapper, SDXLTEUnetWrapper
from hcpdiff.models.compose import ComposeEmbPTHook, ComposeTEEXHook
from hcpdiff.models.compose import SDXLTextEncoder
from hcpdiff.utils.cfg_net_tools import make_hcpdiff, make_plugin
from hcpdiff.utils.net_utils import get_scheduler, auto_tokenizer_cls, auto_text_encoder_cls, load_emb
from hcpdiff.utils.utils import load_config_with_cli, get_cfg_range, mgcd, format_number
from hcpdiff.visualizer import Visualizer
torch.utils.checkpoint.checkpoint = checkpoint_fix
def checkpoint_fix(function, *args, use_reentrant: bool = False, checkpoint_raw=torch.utils.checkpoint.checkpoint, **kwargs):
return checkpoint_raw(function, *args, use_reentrant=use_reentrant, **kwargs) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.