id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
162,657 | import os
from os import listdir
from os.path import isfile, join, exists
import sys
from datetime import datetime
class cstr(str):
def add_code(name, code):
def __new__(cls, text):
def __getattr__(self, attr):
def print(self, **kwargs):
cstr.color.add_code("msg", f"{cstr.color.BLUE}[Comfy3D] {cstr.color.END}")
cstr.color.add_code("warning", f"{cstr.color.LIGHTYELLOW}[Comfy3D] [WARNING] {cstr.color.END}")
cstr.color.add_code("error", f"{cstr.color.RED}[Comfy3D] [ERROR] {cstr.color.END}")
def parse_save_filename(save_path, output_directory, supported_extensions, class_name):
folder_path, filename = os.path.split(save_path)
filename, file_extension = os.path.splitext(filename)
if file_extension.lower() in supported_extensions:
if not os.path.isabs(save_path):
folder_path = join(output_directory, folder_path)
os.makedirs(folder_path, exist_ok=True)
# replace time date format to current time
now = datetime.now() # current date and time
all_date_format = ["%Y", "%m", "%d", "%M", "%S", "%f"]
for date_format in all_date_format:
if date_format in filename:
filename = filename.replace(date_format, now.strftime(date_format))
save_path = join(folder_path, filename) + file_extension
cstr(f"[{class_name}] Saving model to {save_path}").msg.print()
return save_path
else:
cstr(f"[{class_name}] File name {filename} does not end with supported file extensions: {supported_extensions}").error.print()
return None | null |
162,658 | import os
from os import listdir
from os.path import isfile, join, exists
import sys
from datetime import datetime
def get_list_filenames(directory, extension_filter=None):
if exists(directory):
return [f for f in listdir(directory) if isfile(join(directory, f)) and (extension_filter is None or f.lower().endswith(extension_filter))]
else:
return [] | null |
162,661 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
furthest_point_sample = FurthestPointSampling.apply
gather_operation = GatherOperation.apply
grouping_operation = GroupingOperation.apply
ball_query = BallQuery.apply
The provided code snippet includes necessary dependencies for implementing the `sample_and_group` function. Write a Python function `def sample_and_group(xyz, points, npoint, nsample, radius, use_xyz=True)` to solve the following problem:
Args: xyz: Tensor, (B, 3, N) points: Tensor, (B, f, N) npoint: int nsample: int radius: float use_xyz: boolean Returns: new_xyz: Tensor, (B, 3, npoint) new_points: Tensor, (B, 3 | f+3 | f, npoint, nsample) idx_local: Tensor, (B, npoint, nsample) grouped_xyz: Tensor, (B, 3, npoint, nsample)
Here is the function:
def sample_and_group(xyz, points, npoint, nsample, radius, use_xyz=True):
"""
Args:
xyz: Tensor, (B, 3, N)
points: Tensor, (B, f, N)
npoint: int
nsample: int
radius: float
use_xyz: boolean
Returns:
new_xyz: Tensor, (B, 3, npoint)
new_points: Tensor, (B, 3 | f+3 | f, npoint, nsample)
idx_local: Tensor, (B, npoint, nsample)
grouped_xyz: Tensor, (B, 3, npoint, nsample)
"""
xyz_flipped = xyz.permute(0, 2, 1).contiguous() # (B, N, 3)
new_xyz = gather_operation(xyz, furthest_point_sample(xyz_flipped, npoint)) # (B, 3, npoint)
idx = ball_query(radius, nsample, xyz_flipped, new_xyz.permute(0, 2, 1).contiguous()) # (B, npoint, nsample)
grouped_xyz = grouping_operation(xyz, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.unsqueeze(3).repeat(1, 1, 1, nsample)
if points is not None:
grouped_points = grouping_operation(points, idx) # (B, f, npoint, nsample)
if use_xyz:
new_points = torch.cat([grouped_xyz, grouped_points], 1)
else:
new_points = grouped_points
else:
new_points = grouped_xyz
return new_xyz, new_points, idx, grouped_xyz | Args: xyz: Tensor, (B, 3, N) points: Tensor, (B, f, N) npoint: int nsample: int radius: float use_xyz: boolean Returns: new_xyz: Tensor, (B, 3, npoint) new_points: Tensor, (B, 3 | f+3 | f, npoint, nsample) idx_local: Tensor, (B, npoint, nsample) grouped_xyz: Tensor, (B, 3, npoint, nsample) |
162,662 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
The provided code snippet includes necessary dependencies for implementing the `sample_and_group_all` function. Write a Python function `def sample_and_group_all(xyz, points, use_xyz=True)` to solve the following problem:
Args: xyz: Tensor, (B, 3, nsample) points: Tensor, (B, f, nsample) use_xyz: boolean Returns: new_xyz: Tensor, (B, 3, 1) new_points: Tensor, (B, f|f+3|3, 1, nsample) idx: Tensor, (B, 1, nsample) grouped_xyz: Tensor, (B, 3, 1, nsample)
Here is the function:
def sample_and_group_all(xyz, points, use_xyz=True):
"""
Args:
xyz: Tensor, (B, 3, nsample)
points: Tensor, (B, f, nsample)
use_xyz: boolean
Returns:
new_xyz: Tensor, (B, 3, 1)
new_points: Tensor, (B, f|f+3|3, 1, nsample)
idx: Tensor, (B, 1, nsample)
grouped_xyz: Tensor, (B, 3, 1, nsample)
"""
b, _, nsample = xyz.shape
device = xyz.device
new_xyz = torch.zeros((1, 3, 1), dtype=torch.float, device=device).repeat(b, 1, 1)
grouped_xyz = xyz.reshape((b, 3, 1, nsample))
idx = torch.arange(nsample, device=device).reshape(1, 1, nsample).repeat(b, 1, 1)
if points is not None:
if use_xyz:
new_points = torch.cat([xyz, points], 1)
else:
new_points = points
new_points = new_points.unsqueeze(2)
else:
new_points = grouped_xyz
return new_xyz, new_points, idx, grouped_xyz | Args: xyz: Tensor, (B, 3, nsample) points: Tensor, (B, f, nsample) use_xyz: boolean Returns: new_xyz: Tensor, (B, 3, 1) new_points: Tensor, (B, f|f+3|3, 1, nsample) idx: Tensor, (B, 1, nsample) grouped_xyz: Tensor, (B, 3, 1, nsample) |
162,663 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
def query_knn(nsample, xyz, new_xyz, include_self=True):
"""Find k-NN of new_xyz in xyz"""
pad = 0 if include_self else 1
sqrdists = square_distance(new_xyz, xyz) # B, S, N
idx = torch.argsort(sqrdists, dim=-1, descending=False)[:, :, pad: nsample+pad]
return idx.int()
furthest_point_sample = FurthestPointSampling.apply
gather_operation = GatherOperation.apply
grouping_operation = GroupingOperation.apply
The provided code snippet includes necessary dependencies for implementing the `sample_and_group_knn` function. Write a Python function `def sample_and_group_knn(xyz, points, npoint, k, use_xyz=True, idx=None)` to solve the following problem:
Args: xyz: Tensor, (B, 3, N) points: Tensor, (B, f, N) npoint: int nsample: int radius: float use_xyz: boolean Returns: new_xyz: Tensor, (B, 3, npoint) new_points: Tensor, (B, 3 | f+3 | f, npoint, nsample) idx_local: Tensor, (B, npoint, nsample) grouped_xyz: Tensor, (B, 3, npoint, nsample)
Here is the function:
def sample_and_group_knn(xyz, points, npoint, k, use_xyz=True, idx=None):
"""
Args:
xyz: Tensor, (B, 3, N)
points: Tensor, (B, f, N)
npoint: int
nsample: int
radius: float
use_xyz: boolean
Returns:
new_xyz: Tensor, (B, 3, npoint)
new_points: Tensor, (B, 3 | f+3 | f, npoint, nsample)
idx_local: Tensor, (B, npoint, nsample)
grouped_xyz: Tensor, (B, 3, npoint, nsample)
"""
xyz_flipped = xyz.permute(0, 2, 1).contiguous() # (B, N, 3)
new_xyz = gather_operation(xyz, furthest_point_sample(xyz_flipped, npoint)) # (B, 3, npoint)
if idx is None:
idx = query_knn(k, xyz_flipped, new_xyz.permute(0, 2, 1).contiguous())
grouped_xyz = grouping_operation(xyz, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.unsqueeze(3).repeat(1, 1, 1, k)
if points is not None:
grouped_points = grouping_operation(points, idx) # (B, f, npoint, nsample)
if use_xyz:
new_points = torch.cat([grouped_xyz, grouped_points], 1)
else:
new_points = grouped_points
else:
new_points = grouped_xyz
return new_xyz, new_points, idx, grouped_xyz | Args: xyz: Tensor, (B, 3, N) points: Tensor, (B, f, N) npoint: int nsample: int radius: float use_xyz: boolean Returns: new_xyz: Tensor, (B, 3, npoint) new_points: Tensor, (B, 3 | f+3 | f, npoint, nsample) idx_local: Tensor, (B, npoint, nsample) grouped_xyz: Tensor, (B, 3, npoint, nsample) |
162,664 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
furthest_point_sample = FurthestPointSampling.apply
gather_operation = GatherOperation.apply
The provided code snippet includes necessary dependencies for implementing the `fps_subsample` function. Write a Python function `def fps_subsample(pcd, n_points=2048)` to solve the following problem:
Args pcd: (b, 16384, 3) returns new_pcd: (b, n_points, 3)
Here is the function:
def fps_subsample(pcd, n_points=2048):
"""
Args
pcd: (b, 16384, 3)
returns
new_pcd: (b, n_points, 3)
"""
new_pcd = gather_operation(pcd.permute(0, 2, 1).contiguous(), furthest_point_sample(pcd, n_points))
new_pcd = new_pcd.permute(0, 2, 1).contiguous()
return new_pcd | Args pcd: (b, 16384, 3) returns new_pcd: (b, n_points, 3) |
162,665 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
class CouplingLayer(nn.Module):
def __init__(self, d, intermediate_dim, swap=False):
def forward(self, x, logpx=None, reverse=False):
class SequentialFlow(nn.Module):
def __init__(self, layersList):
def forward(self, x, logpx=None, reverse=False, inds=None):
def build_latent_flow(args):
chain = []
for i in range(args.latent_flow_depth):
chain.append(CouplingLayer(args.latent_dim, args.latent_flow_hidden_dim, swap=(i % 2 == 0)))
return SequentialFlow(chain) | null |
162,666 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
class SpectralNorm(object):
def __init__(self, name='weight', dim=0, eps=1e-12):
self.name = name
self.dim = dim
self.eps = eps
def compute_weight(self, module, n_power_iterations):
if n_power_iterations < 0:
raise ValueError(
'Expected n_power_iterations to be non-negative, but '
'got n_power_iterations={}'.format(n_power_iterations)
)
weight = getattr(module, self.name + '_orig')
u = getattr(module, self.name + '_u')
v = getattr(module, self.name + '_v')
weight_mat = weight
if self.dim != 0:
# permute dim to front
weight_mat = weight_mat.permute(self.dim, * [d for d in range(weight_mat.dim()) if d != self.dim])
height = weight_mat.size(0)
weight_mat = weight_mat.reshape(height, -1)
with torch.no_grad():
for _ in range(n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
v = F.normalize(torch.matmul(weight_mat.t(), u), dim=0, eps=self.eps)
u = F.normalize(torch.matmul(weight_mat, v), dim=0, eps=self.eps)
setattr(module, self.name + '_u', u)
setattr(module, self.name + '_v', v)
sigma = torch.dot(u, torch.matmul(weight_mat, v))
weight = weight / sigma
setattr(module, self.name, weight)
def remove(self, module):
weight = getattr(module, self.name)
delattr(module, self.name)
delattr(module, self.name + '_u')
delattr(module, self.name + '_orig')
module.register_parameter(self.name, torch.nn.Parameter(weight))
def get_update_method(self, module):
def update_fn(module, n_power_iterations):
self.compute_weight(module, n_power_iterations)
return update_fn
def __call__(self, module, unused_inputs):
del unused_inputs
self.compute_weight(module, n_power_iterations=0)
# requires_grad might be either True or False during inference.
if not module.training:
r_g = getattr(module, self.name + '_orig').requires_grad
setattr(module, self.name, getattr(module, self.name).detach().requires_grad_(r_g))
def apply(module, name, dim, eps):
fn = SpectralNorm(name, dim, eps)
weight = module._parameters[name]
height = weight.size(dim)
u = F.normalize(weight.new_empty(height).normal_(0, 1), dim=0, eps=fn.eps)
v = F.normalize(weight.new_empty(int(weight.numel() / height)).normal_(0, 1), dim=0, eps=fn.eps)
delattr(module, fn.name)
module.register_parameter(fn.name + "_orig", weight)
# We still need to assign weight back as fn.name because all sorts of
# things may assume that it exists, e.g., when initializing weights.
# However, we can't directly assign as it could be an nn.Parameter and
# gets added as a parameter. Instead, we register weight.data as a
# buffer, which will cause weight to be included in the state dict
# and also supports nn.init due to shared storage.
module.register_buffer(fn.name, weight.data)
module.register_buffer(fn.name + "_u", u)
module.register_buffer(fn.name + "_v", v)
setattr(module, POWER_ITERATION_FN, types.MethodType(fn.get_update_method(module), module))
module.register_forward_pre_hook(fn)
return fn
The provided code snippet includes necessary dependencies for implementing the `remove_spectral_norm` function. Write a Python function `def remove_spectral_norm(module, name='weight')` to solve the following problem:
r"""Removes the spectral normalization reparameterization from a module. Args: module (nn.Module): containing module name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m)
Here is the function:
def remove_spectral_norm(module, name='weight'):
r"""Removes the spectral normalization reparameterization from a module.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
Example:
>>> m = spectral_norm(nn.Linear(40, 10))
>>> remove_spectral_norm(m)
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}".format(name, module)) | r"""Removes the spectral normalization reparameterization from a module. Args: module (nn.Module): containing module name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) |
162,667 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
class CouplingLayer(nn.Module):
def __init__(self, d, intermediate_dim, swap=False):
nn.Module.__init__(self)
self.d = d - (d // 2)
self.swap = swap
self.net_s_t = nn.Sequential(
nn.Linear(self.d, intermediate_dim),
nn.ReLU(inplace=True),
nn.Linear(intermediate_dim, intermediate_dim),
nn.ReLU(inplace=True),
nn.Linear(intermediate_dim, (d - self.d) * 2),
)
def forward(self, x, logpx=None, reverse=False):
if self.swap:
x = torch.cat([x[:, self.d:], x[:, :self.d]], 1)
in_dim = self.d
out_dim = x.shape[1] - self.d
s_t = self.net_s_t(x[:, :in_dim])
scale = torch.sigmoid(s_t[:, :out_dim] + 2.)
shift = s_t[:, out_dim:]
logdetjac = torch.sum(torch.log(scale).view(scale.shape[0], -1), 1, keepdim=True)
if not reverse:
y1 = x[:, self.d:] * scale + shift
delta_logp = -logdetjac
else:
y1 = (x[:, self.d:] - shift) / scale
delta_logp = logdetjac
y = torch.cat([x[:, :self.d], y1], 1) if not self.swap else torch.cat([y1, x[:, :self.d]], 1)
if logpx is None:
return y
else:
return y, logpx + delta_logp
def inplace_spectral_norm(module, name='weight', dim=None, eps=1e-12):
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})} \\
\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generaive Adversarial Networks (GANs) by rescaling the weight tensor
with spectral norm :math:`\sigma` of the weight matrix calculated using
power iteration method. If the dimension of the weight tensor is greater
than 2, it is reshaped to 2D in power iteration method to get spectral
norm. This is implemented via a hook that calculates spectral norm and
rescales weight before every :meth:`~Module.forward` call.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectal norm
dim (int, optional): dimension corresponding to number of outputs,
the default is 0, except for modules that are instances of
ConvTranspose1/2/3d, when it is 1
eps (float, optional): epsilon for numerical stability in
calculating norms
Returns:
The original module with the spectal norm hook
Example::
>>> m = spectral_norm(nn.Linear(20, 40))
Linear (20 -> 40)
>>> m.weight_u.size()
torch.Size([20])
"""
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
SpectralNorm.apply(module, name, dim=dim, eps=eps)
return module
The provided code snippet includes necessary dependencies for implementing the `add_spectral_norm` function. Write a Python function `def add_spectral_norm(model, logger=None)` to solve the following problem:
Applies spectral norm to all modules within the scope of a CNF.
Here is the function:
def add_spectral_norm(model, logger=None):
"""Applies spectral norm to all modules within the scope of a CNF."""
def apply_spectral_norm(module):
if 'weight' in module._parameters:
if logger: logger.info("Adding spectral norm to {}".format(module))
inplace_spectral_norm(module, 'weight')
def find_coupling_layer(module):
if isinstance(module, CouplingLayer):
module.apply(apply_spectral_norm)
else:
for child in module.children():
find_coupling_layer(child)
find_coupling_layer(model) | Applies spectral norm to all modules within the scope of a CNF. |
162,668 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
POWER_ITERATION_FN = "spectral_norm_power_iteration"
def spectral_norm_power_iteration(model, n_power_iterations=1):
def recursive_power_iteration(module):
if hasattr(module, POWER_ITERATION_FN):
getattr(module, POWER_ITERATION_FN)(n_power_iterations)
model.apply(recursive_power_iteration) | null |
162,669 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
def reparameterize_gaussian(mean, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn(std.size()).to(mean)
return mean + std * eps | null |
162,670 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
def gaussian_entropy(logvar):
const = 0.5 * float(logvar.size(1)) * (1. + np.log(np.pi * 2))
ent = 0.5 * logvar.sum(dim=1, keepdim=False) + const
return ent | null |
162,671 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
def standard_normal_logprob(z):
dim = z.size(-1)
log_z = -0.5 * dim * np.log(2 * np.pi)
return log_z - z.pow(2) / 2 | null |
162,672 | import types
import torch
import torch.nn.functional as F
import numpy as np
from torch import nn, einsum
from pointnet2_ops.pointnet2_utils import furthest_point_sample, \
gather_operation, ball_query, three_nn, three_interpolate, grouping_operation
The provided code snippet includes necessary dependencies for implementing the `truncated_normal_` function. Write a Python function `def truncated_normal_(tensor, mean=0, std=1, trunc_std=2)` to solve the following problem:
Taken from https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/15
Here is the function:
def truncated_normal_(tensor, mean=0, std=1, trunc_std=2):
"""
Taken from https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/15
"""
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < trunc_std) & (tmp > -trunc_std)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
return tensor | Taken from https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/15 |
162,673 | import torch
import torch.nn as nn
import math
import math
from typing import Optional
from typing import Callable, Iterable, Sequence, Union
import torch
class CheckpointFunction(torch.autograd.Function):
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with torch.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True)
for x in ctx.input_tensors]
with torch.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = torch.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
The provided code snippet includes necessary dependencies for implementing the `checkpoint` function. Write a Python function `def checkpoint( func: Callable[..., Union[torch.Tensor, Sequence[torch.Tensor]]], inputs: Sequence[torch.Tensor], params: Iterable[torch.Tensor], flag: bool, )` to solve the following problem:
Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing.
Here is the function:
def checkpoint(
func: Callable[..., Union[torch.Tensor, Sequence[torch.Tensor]]],
inputs: Sequence[torch.Tensor],
params: Iterable[torch.Tensor],
flag: bool,
):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs) | Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing. |
162,674 | import torch
import torch.nn as nn
import math
import math
from typing import Optional
from typing import Callable, Iterable, Sequence, Union
import torch
def init_linear(l, stddev):
nn.init.normal_(l.weight, std=stddev)
if l.bias is not None:
nn.init.constant_(l.bias, 0.0) | null |
162,675 | from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointnet2_ops import pointnet2_utils
def build_shared_mlp(mlp_spec: List[int], bn: bool = True):
layers = []
for i in range(1, len(mlp_spec)):
layers.append(
nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1, bias=not bn)
)
if bn:
layers.append(nn.BatchNorm2d(mlp_spec[i]))
layers.append(nn.ReLU(True))
return nn.Sequential(*layers) | null |
162,676 | import torch
import torch.nn as nn
import torch.nn.functional as F
from tgs.utils.base import BaseModule
from tgs.utils.typing import *
from dataclasses import dataclass, field
from pytorch3d.renderer import (
AlphaCompositor,
NormWeightedCompositor,
PointsRasterizationSettings,
PointsRasterizer,
PointsRenderer)
from pytorch3d.renderer.cameras import CamerasBase
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
from .utils import fps_subsample
from einops import rearrange
from .utils import MLP_CONV
from .SPD import SPD
from .SPD_crossattn import SPD_crossattn
from .SPD_pp import SPD_pp
The provided code snippet includes necessary dependencies for implementing the `points_projection` function. Write a Python function `def points_projection(points: Float[Tensor, "B Np 3"], c2ws: Float[Tensor, "B 4 4"], intrinsics: Float[Tensor, "B 3 3"], local_features: Float[Tensor, "B C H W"], raster_point_radius: float = 0.0075, # point size raster_points_per_pixel: int = 1, # a single point per pixel, for now bin_size: int = 0)` to solve the following problem:
points: (B, Np, 3)
Here is the function:
def points_projection(points: Float[Tensor, "B Np 3"],
c2ws: Float[Tensor, "B 4 4"],
intrinsics: Float[Tensor, "B 3 3"],
local_features: Float[Tensor, "B C H W"],
raster_point_radius: float = 0.0075, # point size
raster_points_per_pixel: int = 1, # a single point per pixel, for now
bin_size: int = 0):
"""
points: (B, Np, 3)
"""
B, C, H, W = local_features.shape
device = local_features.device
raster_settings = PointsRasterizationSettings(
image_size=(H, W),
radius=raster_point_radius,
points_per_pixel=raster_points_per_pixel,
bin_size=bin_size,
)
Np = points.shape[1]
c2ws = c2ws.transpose(0, 1).flatten(0, 1)
intrinsics = intrinsics.transpose(0, 1).flatten(0, 1)
R = raster_settings.points_per_pixel
w2cs = torch.inverse(c2ws)
image_size = torch.as_tensor([H, W]).view(
1, 2).expand(w2cs.shape[0], -1).to(device)
cameras = cameras_from_opencv_projection(
w2cs[:, :3, :3], w2cs[:, :3, 3], intrinsics, image_size)
rasterize = PointsRasterizer(
cameras=cameras, raster_settings=raster_settings)
fragments = rasterize(Pointclouds(points))
fragments_idx: Tensor = fragments.idx.long()
visible_pixels = (fragments_idx > -1) # (B, H, W, R)
points_to_visible_pixels = fragments_idx[visible_pixels]
# Reshape local features to (B, H, W, R, C)
local_features = local_features.permute(
0, 2, 3, 1).unsqueeze(-2).expand(-1, -1, -1, R, -1) # (B, H, W, R, C)
# Get local features corresponding to visible points
local_features_proj = torch.zeros(B * Np, C, device=device)
local_features_proj[points_to_visible_pixels] = local_features[visible_pixels]
local_features_proj = local_features_proj.reshape(B, Np, C)
return local_features_proj | points: (B, Np, 3) |
162,677 | from dataclasses import dataclass, field
from collections import defaultdict
from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
from plyfile import PlyData, PlyElement
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from ..utils.typing import *
from ..utils.base import BaseModule
from ..utils.ops import trunc_exp
from ..utils.ops import scale_tensor
from .networks import MLP
from einops import rearrange, reduce
from mesh_processer.mesh_utils import construct_list_of_gs_attributes, write_gs_ply
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
Rt = np.zeros((4, 4))
Rt[:3, :3] = R.transpose()
Rt[:3, 3] = t
Rt[3, 3] = 1.0
C2W = np.linalg.inv(Rt)
cam_center = C2W[:3, 3]
cam_center = (cam_center + translate) * scale
C2W[:3, 3] = cam_center
Rt = np.linalg.inv(C2W)
return np.float32(Rt) | null |
162,678 | from dataclasses import dataclass, field
from collections import defaultdict
from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
from plyfile import PlyData, PlyElement
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from ..utils.typing import *
from ..utils.base import BaseModule
from ..utils.ops import trunc_exp
from ..utils.ops import scale_tensor
from .networks import MLP
from einops import rearrange, reduce
from mesh_processer.mesh_utils import construct_list_of_gs_attributes, write_gs_ply
from torch import Tensor
def getProjectionMatrix(znear, zfar, fovX, fovY):
tanHalfFovY = math.tan((fovY / 2))
tanHalfFovX = math.tan((fovX / 2))
top = tanHalfFovY * znear
bottom = -top
right = tanHalfFovX * znear
left = -right
P = torch.zeros(4, 4)
z_sign = 1.0
P[0, 0] = 2.0 * znear / (right - left)
P[1, 1] = 2.0 * znear / (top - bottom)
P[0, 2] = (right + left) / (right - left)
P[1, 2] = (top + bottom) / (top - bottom)
P[3, 2] = z_sign
P[2, 2] = z_sign * zfar / (zfar - znear)
P[2, 3] = -(zfar * znear) / (zfar - znear)
return P | null |
162,679 | from dataclasses import dataclass, field
from collections import defaultdict
from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
from plyfile import PlyData, PlyElement
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from ..utils.typing import *
from ..utils.base import BaseModule
from ..utils.ops import trunc_exp
from ..utils.ops import scale_tensor
from .networks import MLP
from einops import rearrange, reduce
from mesh_processer.mesh_utils import construct_list_of_gs_attributes, write_gs_ply
from torch import Tensor
def intrinsic_to_fov(intrinsic, w, h):
fx, fy = intrinsic[0, 0], intrinsic[1, 1]
fov_x = 2 * torch.arctan2(w, 2 * fx)
fov_y = 2 * torch.arctan2(h, 2 * fy)
return fov_x, fov_y | null |
162,680 | import collections.abc
import math
from typing import Dict, List, Optional, Set, Tuple, Union
from dataclasses import dataclass
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BackboneOutput,
BaseModelOutput,
BaseModelOutputWithPooling,
ImageClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import (
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.utils.backbone_utils import BackboneMixin
from transformers.models.dinov2.configuration_dinov2 import Dinov2Config
from tgs.models.transformers import MemoryEfficientAttentionMixin
from tgs.utils.typing import *
The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path( input: torch.Tensor, drop_prob: float = 0.0, training: bool = False ) -> torch.Tensor` to solve the following problem:
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
Here is the function:
def drop_path(
input: torch.Tensor, drop_prob: float = 0.0, training: bool = False
) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (
input.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(
shape, dtype=input.dtype, device=input.device
)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. |
162,681 | from dataclasses import dataclass
import torch
import torch.nn as nn
from einops import rearrange
import numpy as np
from ..utils.base import BaseModule
from ..utils.ops import get_activation
from ..utils.typing import *
def get_encoding(n_input_dims: int, config) -> nn.Module:
raise NotImplementedError | null |
162,682 | from dataclasses import dataclass
import torch
import torch.nn as nn
from einops import rearrange
import numpy as np
from ..utils.base import BaseModule
from ..utils.ops import get_activation
from ..utils.typing import *
def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:
raise NotImplementedError | null |
162,683 | import os
from dataclasses import dataclass, field
from omegaconf import OmegaConf
from .typing import *
def get_naming_convention(cfg):
# TODO
name = f"tgs_{cfg.system.backbone.num_layers}"
return name | null |
162,684 | import os
from dataclasses import dataclass, field
from omegaconf import OmegaConf
from .typing import *
OmegaConf.register_new_resolver("calc_exp_lr_decay_rate", lambda factor, n: factor ** (1.0 / n), replace=True)
OmegaConf.register_new_resolver("add", lambda a, b: a + b, replace=True)
OmegaConf.register_new_resolver("sub", lambda a, b: a - b, replace=True)
OmegaConf.register_new_resolver("mul", lambda a, b: a * b, replace=True)
OmegaConf.register_new_resolver("div", lambda a, b: a / b, replace=True)
OmegaConf.register_new_resolver("idiv", lambda a, b: a // b, replace=True)
OmegaConf.register_new_resolver("basename", lambda p: os.path.basename(p), replace=True)
OmegaConf.register_new_resolver("rmspace", lambda s, sub: s.replace(" ", sub), replace=True)
OmegaConf.register_new_resolver("tuple2", lambda s: [float(s), float(s)], replace=True)
OmegaConf.register_new_resolver("gt0", lambda s: s > 0, replace=True)
OmegaConf.register_new_resolver("not", lambda s: not s, replace=True)
OmegaConf.register_new_resolver("shsdim", lambda sh_degree: (sh_degree + 1) ** 2 * 3, replace=True)
class ExperimentConfig:
n_gpus: int = 1
data: dict = field(default_factory=dict)
system: dict = field(default_factory=dict)
def parse_structured(fields: Any, cfg: Optional[Union[dict, DictConfig]] = None) -> Any:
scfg = OmegaConf.structured(fields(**cfg))
return scfg
def load_config(
*yamls: str, cli_args: list = [], from_string=False, makedirs=True, **kwargs
) -> Any:
if from_string:
parse_func = OmegaConf.create
else:
parse_func = OmegaConf.load
yaml_confs = []
for y in yamls:
conf = parse_func(y)
extends = conf.pop("extends", None)
if extends:
assert os.path.exists(extends), f"File {extends} does not exist."
yaml_confs.append(OmegaConf.load(extends))
yaml_confs.append(conf)
cli_conf = OmegaConf.from_cli(cli_args)
cfg = OmegaConf.merge(*yaml_confs, cli_conf, kwargs)
OmegaConf.resolve(cfg)
assert isinstance(cfg, DictConfig)
scfg: ExperimentConfig = parse_structured(ExperimentConfig, cfg)
return scfg | null |
162,685 | import os
from dataclasses import dataclass, field
from omegaconf import OmegaConf
from .typing import *
OmegaConf.register_new_resolver("calc_exp_lr_decay_rate", lambda factor, n: factor ** (1.0 / n), replace=True)
OmegaConf.register_new_resolver("add", lambda a, b: a + b, replace=True)
OmegaConf.register_new_resolver("sub", lambda a, b: a - b, replace=True)
OmegaConf.register_new_resolver("mul", lambda a, b: a * b, replace=True)
OmegaConf.register_new_resolver("div", lambda a, b: a / b, replace=True)
OmegaConf.register_new_resolver("idiv", lambda a, b: a // b, replace=True)
OmegaConf.register_new_resolver("basename", lambda p: os.path.basename(p), replace=True)
OmegaConf.register_new_resolver("rmspace", lambda s, sub: s.replace(" ", sub), replace=True)
OmegaConf.register_new_resolver("tuple2", lambda s: [float(s), float(s)], replace=True)
OmegaConf.register_new_resolver("gt0", lambda s: s > 0, replace=True)
OmegaConf.register_new_resolver("not", lambda s: not s, replace=True)
OmegaConf.register_new_resolver("shsdim", lambda sh_degree: (sh_degree + 1) ** 2 * 3, replace=True)
def config_to_primitive(config, resolve: bool = True) -> Any:
return OmegaConf.to_container(config, resolve=resolve) | null |
162,686 | import os
from dataclasses import dataclass, field
from omegaconf import OmegaConf
from .typing import *
OmegaConf.register_new_resolver("calc_exp_lr_decay_rate", lambda factor, n: factor ** (1.0 / n), replace=True)
OmegaConf.register_new_resolver("add", lambda a, b: a + b, replace=True)
OmegaConf.register_new_resolver("sub", lambda a, b: a - b, replace=True)
OmegaConf.register_new_resolver("mul", lambda a, b: a * b, replace=True)
OmegaConf.register_new_resolver("div", lambda a, b: a / b, replace=True)
OmegaConf.register_new_resolver("idiv", lambda a, b: a // b, replace=True)
OmegaConf.register_new_resolver("basename", lambda p: os.path.basename(p), replace=True)
OmegaConf.register_new_resolver("rmspace", lambda s, sub: s.replace(" ", sub), replace=True)
OmegaConf.register_new_resolver("tuple2", lambda s: [float(s), float(s)], replace=True)
OmegaConf.register_new_resolver("gt0", lambda s: s > 0, replace=True)
OmegaConf.register_new_resolver("not", lambda s: not s, replace=True)
OmegaConf.register_new_resolver("shsdim", lambda sh_degree: (sh_degree + 1) ** 2 * 3, replace=True)
def dump_config(path: str, config) -> None:
with open(path, "w") as fp:
OmegaConf.save(config=config, f=fp) | null |
162,687 | import os
import re
import torch
from packaging import version
from .typing import *
def parse_version(ver: str):
return version.parse(ver) | null |
162,688 | import os
import re
import torch
from packaging import version
from .typing import *
def get_device():
return torch.device(f"cuda:{get_rank()}")
from torch import Tensor
def load_module_weights(
path, module_name=None, ignore_modules=None, map_location=None
) -> Tuple[dict, int, int]:
if module_name is not None and ignore_modules is not None:
raise ValueError("module_name and ignore_modules cannot be both set")
if map_location is None:
map_location = get_device()
ckpt = torch.load(path, map_location=map_location)
state_dict = ckpt["state_dict"]
state_dict_to_load = state_dict
if ignore_modules is not None:
state_dict_to_load = {}
for k, v in state_dict.items():
ignore = any(
[k.startswith(ignore_module + ".") for ignore_module in ignore_modules]
)
if ignore:
continue
state_dict_to_load[k] = v
if module_name is not None:
state_dict_to_load = {}
for k, v in state_dict.items():
m = re.match(rf"^{module_name}\.(.*)$", k)
if m is None:
continue
state_dict_to_load[m.group(1)] = v
return state_dict_to_load | null |
162,689 | import os
import re
import torch
from packaging import version
from .typing import *
def make_recursive_func(func):
def wrapper(vars, *args, **kwargs):
if isinstance(vars, list):
return [wrapper(x, *args, **kwargs) for x in vars]
elif isinstance(vars, tuple):
return tuple([wrapper(x, *args, **kwargs) for x in vars])
elif isinstance(vars, dict):
return {k: wrapper(v, *args, **kwargs) for k, v in vars.items()}
else:
return func(vars, *args, **kwargs)
return wrapper | null |
162,690 | import os
import re
import torch
from packaging import version
from .typing import *
from torch import Tensor
def todevice(vars, device="cuda"):
if isinstance(vars, torch.Tensor):
return vars.to(device)
elif isinstance(vars, str):
return vars
elif isinstance(vars, bool):
return vars
elif isinstance(vars, float):
return vars
elif isinstance(vars, int):
return vars
else:
raise NotImplementedError("invalid input type {} for tensor2numpy".format(type(vars))) | null |
162,691 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
ValidScale = Union[Tuple[float, float], Num[Tensor, "2 D"]]
def scale_tensor(
dat: Num[Tensor, "... D"], inp_scale: ValidScale, tgt_scale: ValidScale
):
if inp_scale is None:
inp_scale = (0, 1)
if tgt_scale is None:
tgt_scale = (0, 1)
if isinstance(tgt_scale, Tensor):
assert dat.shape[-1] == tgt_scale.shape[-1]
dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])
dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]
return dat | null |
162,692 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
trunc_exp = _TruncExp.apply
from torch import Tensor
def get_activation(name) -> Callable:
if name is None:
return lambda x: x
name = name.lower()
if name == "none":
return lambda x: x
elif name == "lin2srgb":
return lambda x: torch.where(
x > 0.0031308,
torch.pow(torch.clamp(x, min=0.0031308), 1.0 / 2.4) * 1.055 - 0.055,
12.92 * x,
).clamp(0.0, 1.0)
elif name == "exp":
return lambda x: torch.exp(x)
elif name == "shifted_exp":
return lambda x: torch.exp(x - 1.0)
elif name == "trunc_exp":
return trunc_exp
elif name == "shifted_trunc_exp":
return lambda x: trunc_exp(x - 1.0)
elif name == "sigmoid":
return lambda x: torch.sigmoid(x)
elif name == "tanh":
return lambda x: torch.tanh(x)
elif name == "shifted_softplus":
return lambda x: F.softplus(x - 1.0)
elif name == "scale_-11_01":
return lambda x: x * 0.5 + 0.5
else:
try:
return getattr(F, name)
except AttributeError:
raise ValueError(f"Unknown activation function: {name}") | null |
162,693 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `get_ray_directions` function. Write a Python function `def get_ray_directions( H: int, W: int, focal: Union[float, Tuple[float, float]], principal: Optional[Tuple[float, float]] = None, use_pixel_centers: bool = True, ) -> Float[Tensor, "H W 3"]` to solve the following problem:
Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal, principal, use_pixel_centers: image height, width, focal length, principal point and whether use pixel centers Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate
Here is the function:
def get_ray_directions(
H: int,
W: int,
focal: Union[float, Tuple[float, float]],
principal: Optional[Tuple[float, float]] = None,
use_pixel_centers: bool = True,
) -> Float[Tensor, "H W 3"]:
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal, principal, use_pixel_centers: image height, width, focal length, principal point and whether use pixel centers
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
pixel_center = 0.5 if use_pixel_centers else 0
if isinstance(focal, float):
fx, fy = focal, focal
cx, cy = W / 2, H / 2
else:
fx, fy = focal
assert principal is not None
cx, cy = principal
i, j = torch.meshgrid(
torch.arange(W, dtype=torch.float32) + pixel_center,
torch.arange(H, dtype=torch.float32) + pixel_center,
indexing="xy",
)
directions: Float[Tensor, "H W 3"] = torch.stack(
[(i - cx) / fx, -(j - cy) / fy, -torch.ones_like(i)], -1
)
return directions | Get ray directions for all pixels in camera coordinate. Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/ ray-tracing-generating-camera-rays/standard-coordinate-systems Inputs: H, W, focal, principal, use_pixel_centers: image height, width, focal length, principal point and whether use pixel centers Outputs: directions: (H, W, 3), the direction of the rays in camera coordinate |
162,694 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
from torch import Tensor
def get_rays(
directions: Float[Tensor, "... 3"],
c2w: Float[Tensor, "... 4 4"],
keepdim=False,
noise_scale=0.0,
) -> Tuple[Float[Tensor, "... 3"], Float[Tensor, "... 3"]]:
# Rotate ray directions from camera coordinate to the world coordinate
assert directions.shape[-1] == 3
if directions.ndim == 2: # (N_rays, 3)
if c2w.ndim == 2: # (4, 4)
c2w = c2w[None, :, :]
assert c2w.ndim == 3 # (N_rays, 4, 4) or (1, 4, 4)
rays_d = (directions[:, None, :] * c2w[:, :3, :3]).sum(-1) # (N_rays, 3)
rays_o = c2w[:, :3, 3].expand(rays_d.shape)
elif directions.ndim == 3: # (H, W, 3)
assert c2w.ndim in [2, 3]
if c2w.ndim == 2: # (4, 4)
rays_d = (directions[:, :, None, :] * c2w[None, None, :3, :3]).sum(
-1
) # (H, W, 3)
rays_o = c2w[None, None, :3, 3].expand(rays_d.shape)
elif c2w.ndim == 3: # (B, 4, 4)
rays_d = (directions[None, :, :, None, :] * c2w[:, None, None, :3, :3]).sum(
-1
) # (B, H, W, 3)
rays_o = c2w[:, None, None, :3, 3].expand(rays_d.shape)
elif directions.ndim == 4: # (B, H, W, 3)
assert c2w.ndim == 3 # (B, 4, 4)
rays_d = (directions[:, :, :, None, :] * c2w[:, None, None, :3, :3]).sum(
-1
) # (B, H, W, 3)
rays_o = c2w[:, None, None, :3, 3].expand(rays_d.shape)
# add camera noise to avoid grid-like artifect
# https://github.com/ashawkey/stable-dreamfusion/blob/49c3d4fa01d68a4f027755acf94e1ff6020458cc/nerf/utils.py#L373
if noise_scale > 0:
rays_o = rays_o + torch.randn(3, device=rays_o.device) * noise_scale
rays_d = rays_d + torch.randn(3, device=rays_d.device) * noise_scale
rays_d = F.normalize(rays_d, dim=-1)
if not keepdim:
rays_o, rays_d = rays_o.reshape(-1, 3), rays_d.reshape(-1, 3)
return rays_o, rays_d | null |
162,695 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
from torch import Tensor
def get_projection_matrix(
fovy: Union[float, Float[Tensor, "B"]], aspect_wh: float, near: float, far: float
) -> Float[Tensor, "*B 4 4"]:
if isinstance(fovy, float):
proj_mtx = torch.zeros(4, 4, dtype=torch.float32)
proj_mtx[0, 0] = 1.0 / (math.tan(fovy / 2.0) * aspect_wh)
proj_mtx[1, 1] = -1.0 / math.tan(
fovy / 2.0
) # add a negative sign here as the y axis is flipped in nvdiffrast output
proj_mtx[2, 2] = -(far + near) / (far - near)
proj_mtx[2, 3] = -2.0 * far * near / (far - near)
proj_mtx[3, 2] = -1.0
else:
batch_size = fovy.shape[0]
proj_mtx = torch.zeros(batch_size, 4, 4, dtype=torch.float32)
proj_mtx[:, 0, 0] = 1.0 / (torch.tan(fovy / 2.0) * aspect_wh)
proj_mtx[:, 1, 1] = -1.0 / torch.tan(
fovy / 2.0
) # add a negative sign here as the y axis is flipped in nvdiffrast output
proj_mtx[:, 2, 2] = -(far + near) / (far - near)
proj_mtx[:, 2, 3] = -2.0 * far * near / (far - near)
proj_mtx[:, 3, 2] = -1.0
return proj_mtx | null |
162,696 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
from torch import Tensor
def get_mvp_matrix(
c2w: Float[Tensor, "*B 4 4"], proj_mtx: Float[Tensor, "*B 4 4"]
) -> Float[Tensor, "*B 4 4"]:
# calculate w2c from c2w: R' = Rt, t' = -Rt * t
# mathematically equivalent to (c2w)^-1
if c2w.ndim == 2:
assert proj_mtx.ndim == 2
w2c: Float[Tensor, "4 4"] = torch.zeros(4, 4).to(c2w)
w2c[:3, :3] = c2w[:3, :3].permute(1, 0)
w2c[:3, 3:] = -c2w[:3, :3].permute(1, 0) @ c2w[:3, 3:]
w2c[3, 3] = 1.0
else:
w2c: Float[Tensor, "B 4 4"] = torch.zeros(c2w.shape[0], 4, 4).to(c2w)
w2c[:, :3, :3] = c2w[:, :3, :3].permute(0, 2, 1)
w2c[:, :3, 3:] = -c2w[:, :3, :3].permute(0, 2, 1) @ c2w[:, :3, 3:]
w2c[:, 3, 3] = 1.0
# calculate mvp matrix by proj_mtx @ w2c (mv_mtx)
mvp_mtx = proj_mtx @ w2c
return mvp_mtx | null |
162,697 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
from torch import Tensor
def get_intrinsic_from_fov(fov, H, W, bs=-1):
focal_length = 0.5 * H / np.tan(0.5 * fov)
intrinsic = np.identity(3, dtype=np.float32)
intrinsic[0, 0] = focal_length
intrinsic[1, 1] = focal_length
intrinsic[0, 2] = W / 2.0
intrinsic[1, 2] = H / 2.0
if bs > 0:
intrinsic = intrinsic[None].repeat(bs, axis=0)
return torch.from_numpy(intrinsic) | null |
162,698 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
from torch import Tensor
def points_projection(points: Float[Tensor, "B Np 3"],
c2ws: Float[Tensor, "B 4 4"],
intrinsics: Float[Tensor, "B 3 3"],
local_features: Float[Tensor, "B C H W"],
# Rasterization settings
raster_point_radius: float = 0.0075, # point size
raster_points_per_pixel: int = 1, # a single point per pixel, for now
bin_size: int = 0):
B, C, H, W = local_features.shape
device = local_features.device
raster_settings = PointsRasterizationSettings(
image_size=(H, W),
radius=raster_point_radius,
points_per_pixel=raster_points_per_pixel,
bin_size=bin_size,
)
Np = points.shape[1]
R = raster_settings.points_per_pixel
w2cs = torch.inverse(c2ws)
image_size = torch.as_tensor([H, W]).view(1, 2).expand(w2cs.shape[0], -1).to(device)
cameras = cameras_from_opencv_projection(w2cs[:, :3, :3], w2cs[:, :3, 3], intrinsics, image_size)
rasterize = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
fragments = rasterize(Pointclouds(points))
fragments_idx: Tensor = fragments.idx.long()
visible_pixels = (fragments_idx > -1) # (B, H, W, R)
points_to_visible_pixels = fragments_idx[visible_pixels]
# Reshape local features to (B, H, W, R, C)
local_features = local_features.permute(0, 2, 3, 1).unsqueeze(-2).expand(-1, -1, -1, R, -1) # (B, H, W, R, C)
# Get local features corresponding to visible points
local_features_proj = torch.zeros(B * Np, C, device=device)
local_features_proj[points_to_visible_pixels] = local_features[visible_pixels]
local_features_proj = local_features_proj.reshape(B, Np, C)
return local_features_proj | null |
162,699 | import math
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.cuda.amp import custom_bwd, custom_fwd
from pytorch3d import io
from pytorch3d.renderer import (
PointsRasterizationSettings,
PointsRasterizer)
from pytorch3d.structures import Pointclouds
from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection
import cv2
from .typing import *
from torch import Tensor
def compute_distance_transform(mask: torch.Tensor):
image_size = mask.shape[-1]
distance_transform = torch.stack([
torch.from_numpy(cv2.distanceTransform(
(1 - m), distanceType=cv2.DIST_L2, maskSize=cv2.DIST_MASK_3
) / (image_size / 2))
for m in mask.squeeze(1).detach().cpu().numpy().astype(np.uint8)
]).unsqueeze(1).clip(0, 1).to(mask.device)
return distance_transform | null |
162,700 | from dataclasses import dataclass
import torch.nn as nn
from .config import parse_structured
from .misc import get_device, load_module_weights
from .typing import *
class Updateable:
def do_update_step(
self, epoch: int, global_step: int, on_load_weights: bool = False
):
for attr in self.__dir__():
if attr.startswith("_"):
continue
try:
module = getattr(self, attr)
except:
continue # ignore attributes like property, which can't be retrived using getattr?
if isinstance(module, Updateable):
module.do_update_step(
epoch, global_step, on_load_weights=on_load_weights
)
self.update_step(epoch, global_step, on_load_weights=on_load_weights)
def do_update_step_end(self, epoch: int, global_step: int):
for attr in self.__dir__():
if attr.startswith("_"):
continue
try:
module = getattr(self, attr)
except:
continue # ignore attributes like property, which can't be retrived using getattr?
if isinstance(module, Updateable):
module.do_update_step_end(epoch, global_step)
self.update_step_end(epoch, global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# override this method to implement custom update logic
# if on_load_weights is True, you should be careful doing things related to model evaluations,
# as the models and tensors are not guarenteed to be on the same device
pass
def update_step_end(self, epoch: int, global_step: int):
pass
def update_if_possible(module: Any, epoch: int, global_step: int) -> None:
if isinstance(module, Updateable):
module.do_update_step(epoch, global_step) | null |
162,701 | from dataclasses import dataclass
import torch.nn as nn
from .config import parse_structured
from .misc import get_device, load_module_weights
from .typing import *
class Updateable:
def do_update_step(
self, epoch: int, global_step: int, on_load_weights: bool = False
):
for attr in self.__dir__():
if attr.startswith("_"):
continue
try:
module = getattr(self, attr)
except:
continue # ignore attributes like property, which can't be retrived using getattr?
if isinstance(module, Updateable):
module.do_update_step(
epoch, global_step, on_load_weights=on_load_weights
)
self.update_step(epoch, global_step, on_load_weights=on_load_weights)
def do_update_step_end(self, epoch: int, global_step: int):
for attr in self.__dir__():
if attr.startswith("_"):
continue
try:
module = getattr(self, attr)
except:
continue # ignore attributes like property, which can't be retrived using getattr?
if isinstance(module, Updateable):
module.do_update_step_end(epoch, global_step)
self.update_step_end(epoch, global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
# override this method to implement custom update logic
# if on_load_weights is True, you should be careful doing things related to model evaluations,
# as the models and tensors are not guarenteed to be on the same device
pass
def update_step_end(self, epoch: int, global_step: int):
pass
def update_end_if_possible(module: Any, epoch: int, global_step: int) -> None:
if isinstance(module, Updateable):
module.do_update_step_end(epoch, global_step) | null |
162,702 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import roma
from kiui.op import safe_normalize
def get_rays(pose, h, w, fovy, opengl=True):
x, y = torch.meshgrid(
torch.arange(w, device=pose.device),
torch.arange(h, device=pose.device),
indexing="xy",
)
x = x.flatten()
y = y.flatten()
cx = w * 0.5
cy = h * 0.5
focal = h * 0.5 / np.tan(0.5 * np.deg2rad(fovy))
camera_dirs = F.pad(
torch.stack(
[
(x - cx + 0.5) / focal,
(y - cy + 0.5) / focal * (-1.0 if opengl else 1.0),
],
dim=-1,
),
(0, 1),
value=(-1.0 if opengl else 1.0),
) # [hw, 3]
rays_d = camera_dirs @ pose[:3, :3].transpose(0, 1) # [hw, 3]
rays_o = pose[:3, 3].unsqueeze(0).expand_as(rays_d) # [hw, 3]
rays_o = rays_o.view(h, w, 3)
rays_d = safe_normalize(rays_d).view(h, w, 3)
return rays_o, rays_d | null |
162,703 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import roma
from kiui.op import safe_normalize
def orbit_camera_jitter(poses, strength=0.1):
# poses: [B, 4, 4], assume orbit camera in opengl format
# random orbital rotate
B = poses.shape[0]
rotvec_x = poses[:, :3, 1] * strength * np.pi * (torch.rand(B, 1, device=poses.device) * 2 - 1)
rotvec_y = poses[:, :3, 0] * strength * np.pi / 2 * (torch.rand(B, 1, device=poses.device) * 2 - 1)
rot = roma.rotvec_to_rotmat(rotvec_x) @ roma.rotvec_to_rotmat(rotvec_y)
R = rot @ poses[:, :3, :3]
T = rot @ poses[:, :3, 3:]
new_poses = poses.clone()
new_poses[:, :3, :3] = R
new_poses[:, :3, 3:] = T
return new_poses | null |
162,704 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import roma
from kiui.op import safe_normalize
def grid_distortion(images, strength=0.5):
# images: [B, C, H, W]
# num_steps: int, grid resolution for distortion
# strength: float in [0, 1], strength of distortion
B, C, H, W = images.shape
num_steps = np.random.randint(8, 17)
grid_steps = torch.linspace(-1, 1, num_steps)
# have to loop batch...
grids = []
for b in range(B):
# construct displacement
x_steps = torch.linspace(0, 1, num_steps) # [num_steps], inclusive
x_steps = (x_steps + strength * (torch.rand_like(x_steps) - 0.5) / (num_steps - 1)).clamp(0, 1) # perturb
x_steps = (x_steps * W).long() # [num_steps]
x_steps[0] = 0
x_steps[-1] = W
xs = []
for i in range(num_steps - 1):
xs.append(torch.linspace(grid_steps[i], grid_steps[i + 1], x_steps[i + 1] - x_steps[i]))
xs = torch.cat(xs, dim=0) # [W]
y_steps = torch.linspace(0, 1, num_steps) # [num_steps], inclusive
y_steps = (y_steps + strength * (torch.rand_like(y_steps) - 0.5) / (num_steps - 1)).clamp(0, 1) # perturb
y_steps = (y_steps * H).long() # [num_steps]
y_steps[0] = 0
y_steps[-1] = H
ys = []
for i in range(num_steps - 1):
ys.append(torch.linspace(grid_steps[i], grid_steps[i + 1], y_steps[i + 1] - y_steps[i]))
ys = torch.cat(ys, dim=0) # [H]
# construct grid
grid_x, grid_y = torch.meshgrid(xs, ys, indexing='xy') # [H, W]
grid = torch.stack([grid_x, grid_y], dim=-1) # [H, W, 2]
grids.append(grid)
grids = torch.stack(grids, dim=0).to(images.device) # [B, H, W, 2]
# grid sample
images = F.grid_sample(images, grids, align_corners=False)
return images | null |
162,705 | import math
import numpy as np
from inspect import isfunction
from typing import Optional, Any, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from diffusers.configuration_utils import ConfigMixin
from diffusers.models.modeling_utils import ModelMixin
import xformers
import xformers.ops
from kiui.cam import orbit_camera
def get_camera(
num_frames, elevation=0, azimuth_start=0, azimuth_span=360, blender_coord=True, extra_view=False,
):
angle_gap = azimuth_span / num_frames
cameras = []
for azimuth in np.arange(azimuth_start, azimuth_span + azimuth_start, angle_gap):
pose = orbit_camera(elevation, azimuth, radius=1) # [4, 4]
# opengl to blender
if blender_coord:
pose[2] *= -1
pose[[1, 2]] = pose[[2, 1]]
cameras.append(pose.flatten())
if extra_view:
cameras.append(np.zeros_like(cameras[0]))
return torch.from_numpy(np.stack(cameras, axis=0)).float() # [num_frames, 16] | null |
162,711 | import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
from typing import Literal, Tuple, Optional, Any
import cv2
import os
import math
import cv2
import numpy as np
def add_margin(pil_img, color=0, size=256):
width, height = pil_img.size
result = Image.new(pil_img.mode, (size, size), color)
result.paste(pil_img, ((size - width) // 2, (size - height) // 2))
return result | null |
162,712 | import numpy as np
import torch
from torch.utils.data import Dataset
from PIL import Image
from typing import Literal, Tuple, Optional, Any
import cv2
import os
import math
import cv2
import numpy as np
def scale_and_place_object(image, scale_factor):
assert np.shape(image)[-1]==4 # RGBA
# Extract the alpha channel (transparency) and the object (RGB channels)
alpha_channel = image[:, :, 3]
# Find the bounding box coordinates of the object
coords = cv2.findNonZero(alpha_channel)
x, y, width, height = cv2.boundingRect(coords)
# Calculate the scale factor for resizing
original_height, original_width = image.shape[:2]
if width > height:
size = width
original_size = original_width
else:
size = height
original_size = original_height
scale_factor = min(scale_factor, size / (original_size+0.0))
new_size = scale_factor * original_size
scale_factor = new_size / size
# Calculate the new size based on the scale factor
new_width = int(width * scale_factor)
new_height = int(height * scale_factor)
center_x = original_width // 2
center_y = original_height // 2
paste_x = center_x - (new_width // 2)
paste_y = center_y - (new_height // 2)
# Resize the object (RGB channels) to the new size
rescaled_object = cv2.resize(image[y:y+height, x:x+width], (new_width, new_height))
# Create a new RGBA image with the resized image
new_image = np.zeros((original_height, original_width, 4), dtype=np.uint8)
new_image[paste_y:paste_y + new_height, paste_x:paste_x + new_width] = rescaled_object
return new_image | null |
162,714 | import numpy as np
def worldNormal2camNormal(rot_w2c, normal_map_world):
H,W,_ = normal_map_world.shape
# normal_img = np.matmul(rot_w2c[None, :, :], worldNormal.reshape(-1,3)[:, :, None]).reshape([H, W, 3])
# faster version
# Reshape the normal map into a 2D array where each row represents a normal vector
normal_map_flat = normal_map_world.reshape(-1, 3)
# Transform the normal vectors using the transformation matrix
normal_map_camera_flat = np.dot(normal_map_flat, rot_w2c.T)
# Reshape the transformed normal map back to its original shape
normal_map_camera = normal_map_camera_flat.reshape(normal_map_world.shape)
return normal_map_camera
def trans_normal(normal, RT_w2c, RT_w2c_target):
# normal_world = camNormal2worldNormal(np.linalg.inv(RT_w2c[:3,:3]), normal)
# normal_target_cam = worldNormal2camNormal(RT_w2c_target[:3,:3], normal_world)
relative_RT = np.matmul(RT_w2c_target[:3,:3], np.linalg.inv(RT_w2c[:3,:3]))
normal_target_cam = worldNormal2camNormal(relative_RT[:3,:3], normal)
return normal_target_cam | null |
162,718 | from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from diffusers.utils import is_torch_version, logging
from diffusers.models.normalization import AdaGroupNorm
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0
from diffusers.models.transformers.dual_transformer_2d import DualTransformer2DModel
from diffusers.models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D
from mvdiffusion.models.transformer_mv2d import TransformerMV2DModel
from diffusers.models.unets.unet_2d_blocks import DownBlock2D, ResnetDownsampleBlock2D, AttnDownBlock2D, CrossAttnDownBlock2D, SimpleCrossAttnDownBlock2D, SkipDownBlock2D, AttnSkipDownBlock2D, DownEncoderBlock2D, AttnDownEncoderBlock2D, KDownBlock2D, KCrossAttnDownBlock2D
from diffusers.models.unets.unet_2d_blocks import UpBlock2D, ResnetUpsampleBlock2D, CrossAttnUpBlock2D, SimpleCrossAttnUpBlock2D, AttnUpBlock2D, SkipUpBlock2D, AttnSkipUpBlock2D, UpDecoderBlock2D, AttnUpDecoderBlock2D, KUpBlock2D, KCrossAttnUpBlock2D
logger = logging.get_logger(__name__)
class CrossAttnDownBlockMV2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
transformer_layers_per_block: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
num_attention_heads=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
num_views: int = 1,
cd_attention_last: bool = False,
cd_attention_mid: bool = False,
multiview_attention: bool = True,
sparse_mv_attention: bool = False,
mvcd_attention: bool=False
):
super().__init__()
resnets = []
attentions = []
self.has_cross_attention = True
self.num_attention_heads = num_attention_heads
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
if not dual_cross_attention:
attentions.append(
TransformerMV2DModel(
num_attention_heads,
out_channels // num_attention_heads,
in_channels=out_channels,
num_layers=transformer_layers_per_block,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
num_views=num_views,
cd_attention_last=cd_attention_last,
cd_attention_mid=cd_attention_mid,
multiview_attention=multiview_attention,
sparse_mv_attention=sparse_mv_attention,
mvcd_attention=mvcd_attention
)
)
else:
raise NotImplementedError
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.FloatTensor,
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
additional_residuals=None,
):
output_states = ()
blocks = list(zip(self.resnets, self.attentions))
for i, (resnet, attn) in enumerate(blocks):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
None, # timestep
None, # class_labels
cross_attention_kwargs,
attention_mask,
encoder_attention_mask,
**ckpt_kwargs,
)[0]
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
# apply additional residuals to the output of the last pair of resnet and attention blocks
if i == len(blocks) - 1 and additional_residuals is not None:
hidden_states = hidden_states + additional_residuals
output_states = output_states + (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states = output_states + (hidden_states,)
return hidden_states, output_states
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
transformer_layers_per_block=1,
num_attention_heads=None,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
resnet_skip_time_act=False,
resnet_out_scale_factor=1.0,
cross_attention_norm=None,
attention_head_dim=None,
downsample_type=None,
num_views=1,
cd_attention_last: bool = False,
cd_attention_mid: bool = False,
multiview_attention: bool = True,
sparse_mv_attention: bool = False,
mvcd_attention: bool=False
):
# If attn head dim is not defined, we default it to the number of heads
if attention_head_dim is None:
logger.warn(
f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}."
)
attention_head_dim = num_attention_heads
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock2D":
return DownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "ResnetDownsampleBlock2D":
return ResnetDownsampleBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
skip_time_act=resnet_skip_time_act,
output_scale_factor=resnet_out_scale_factor,
)
elif down_block_type == "AttnDownBlock2D":
if add_downsample is False:
downsample_type = None
else:
downsample_type = downsample_type or "conv" # default to 'conv'
return AttnDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
attention_head_dim=attention_head_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
downsample_type=downsample_type,
)
elif down_block_type == "CrossAttnDownBlock2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D")
return CrossAttnDownBlock2D(
num_layers=num_layers,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
)
# custom MV2D attention block
elif down_block_type == "CrossAttnDownBlockMV2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockMV2D")
return CrossAttnDownBlockMV2D(
num_layers=num_layers,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
num_views=num_views,
cd_attention_last=cd_attention_last,
cd_attention_mid=cd_attention_mid,
multiview_attention=multiview_attention,
sparse_mv_attention=sparse_mv_attention,
mvcd_attention=mvcd_attention
)
elif down_block_type == "SimpleCrossAttnDownBlock2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D")
return SimpleCrossAttnDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attention_head_dim=attention_head_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
skip_time_act=resnet_skip_time_act,
output_scale_factor=resnet_out_scale_factor,
only_cross_attention=only_cross_attention,
cross_attention_norm=cross_attention_norm,
)
elif down_block_type == "SkipDownBlock2D":
return SkipDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "AttnSkipDownBlock2D":
return AttnSkipDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
attention_head_dim=attention_head_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "DownEncoderBlock2D":
return DownEncoderBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "AttnDownEncoderBlock2D":
return AttnDownEncoderBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
attention_head_dim=attention_head_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "KDownBlock2D":
return KDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
)
elif down_block_type == "KCrossAttnDownBlock2D":
return KCrossAttnDownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
cross_attention_dim=cross_attention_dim,
attention_head_dim=attention_head_dim,
add_self_attention=True if not add_downsample else False,
)
raise ValueError(f"{down_block_type} does not exist.") | null |
162,719 | from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from diffusers.utils import is_torch_version, logging
from diffusers.models.normalization import AdaGroupNorm
from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0
from diffusers.models.transformers.dual_transformer_2d import DualTransformer2DModel
from diffusers.models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D
from mvdiffusion.models.transformer_mv2d import TransformerMV2DModel
from diffusers.models.unets.unet_2d_blocks import DownBlock2D, ResnetDownsampleBlock2D, AttnDownBlock2D, CrossAttnDownBlock2D, SimpleCrossAttnDownBlock2D, SkipDownBlock2D, AttnSkipDownBlock2D, DownEncoderBlock2D, AttnDownEncoderBlock2D, KDownBlock2D, KCrossAttnDownBlock2D
from diffusers.models.unets.unet_2d_blocks import UpBlock2D, ResnetUpsampleBlock2D, CrossAttnUpBlock2D, SimpleCrossAttnUpBlock2D, AttnUpBlock2D, SkipUpBlock2D, AttnSkipUpBlock2D, UpDecoderBlock2D, AttnUpDecoderBlock2D, KUpBlock2D, KCrossAttnUpBlock2D
logger = logging.get_logger(__name__)
class CrossAttnUpBlockMV2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
transformer_layers_per_block: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
num_attention_heads=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
num_views: int = 1,
cd_attention_last: bool = False,
cd_attention_mid: bool = False,
multiview_attention: bool = True,
sparse_mv_attention: bool = False,
mvcd_attention: bool=False
):
super().__init__()
resnets = []
attentions = []
self.has_cross_attention = True
self.num_attention_heads = num_attention_heads
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
if not dual_cross_attention:
attentions.append(
TransformerMV2DModel(
num_attention_heads,
out_channels // num_attention_heads,
in_channels=out_channels,
num_layers=transformer_layers_per_block,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
num_views=num_views,
cd_attention_last=cd_attention_last,
cd_attention_mid=cd_attention_mid,
multiview_attention=multiview_attention,
sparse_mv_attention=sparse_mv_attention,
mvcd_attention=mvcd_attention
)
)
else:
raise NotImplementedError
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.FloatTensor,
res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
temb: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
):
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(resnet),
hidden_states,
temb,
**ckpt_kwargs,
)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
None, # timestep
None, # class_labels
cross_attention_kwargs,
attention_mask,
encoder_attention_mask,
**ckpt_kwargs,
)[0]
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
transformer_layers_per_block=1,
num_attention_heads=None,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
resnet_skip_time_act=False,
resnet_out_scale_factor=1.0,
cross_attention_norm=None,
attention_head_dim=None,
upsample_type=None,
num_views=1,
cd_attention_last: bool = False,
cd_attention_mid: bool = False,
multiview_attention: bool = True,
sparse_mv_attention: bool = False,
mvcd_attention: bool=False
):
# If attn head dim is not defined, we default it to the number of heads
if attention_head_dim is None:
logger.warn(
f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}."
)
attention_head_dim = num_attention_heads
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock2D":
return UpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif up_block_type == "ResnetUpsampleBlock2D":
return ResnetUpsampleBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
skip_time_act=resnet_skip_time_act,
output_scale_factor=resnet_out_scale_factor,
)
elif up_block_type == "CrossAttnUpBlock2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D")
return CrossAttnUpBlock2D(
num_layers=num_layers,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
)
# custom MV2D attention block
elif up_block_type == "CrossAttnUpBlockMV2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockMV2D")
return CrossAttnUpBlockMV2D(
num_layers=num_layers,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
num_views=num_views,
cd_attention_last=cd_attention_last,
cd_attention_mid=cd_attention_mid,
multiview_attention=multiview_attention,
sparse_mv_attention=sparse_mv_attention,
mvcd_attention=mvcd_attention
)
elif up_block_type == "SimpleCrossAttnUpBlock2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D")
return SimpleCrossAttnUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attention_head_dim=attention_head_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
skip_time_act=resnet_skip_time_act,
output_scale_factor=resnet_out_scale_factor,
only_cross_attention=only_cross_attention,
cross_attention_norm=cross_attention_norm,
)
elif up_block_type == "AttnUpBlock2D":
if add_upsample is False:
upsample_type = None
else:
upsample_type = upsample_type or "conv" # default to 'conv'
return AttnUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
attention_head_dim=attention_head_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
upsample_type=upsample_type,
)
elif up_block_type == "SkipUpBlock2D":
return SkipUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif up_block_type == "AttnSkipUpBlock2D":
return AttnSkipUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
attention_head_dim=attention_head_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif up_block_type == "UpDecoderBlock2D":
return UpDecoderBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
temb_channels=temb_channels,
)
elif up_block_type == "AttnUpDecoderBlock2D":
return AttnUpDecoderBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
attention_head_dim=attention_head_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
temb_channels=temb_channels,
)
elif up_block_type == "KUpBlock2D":
return KUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
)
elif up_block_type == "KCrossAttnUpBlock2D":
return KCrossAttnUpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
cross_attention_dim=cross_attention_dim,
attention_head_dim=attention_head_dim,
)
raise ValueError(f"{up_block_type} does not exist.") | null |
162,720 | from dataclasses import dataclass
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.embeddings import ImagePositionalEmbeddings
from diffusers.utils import BaseOutput, deprecate
from diffusers.utils.torch_utils import maybe_allow_in_graph
from diffusers.models.attention import FeedForward, AdaLayerNorm, AdaLayerNormZero, Attention
from diffusers.models.embeddings import PatchEmbed
from diffusers.models.lora import LoRACompatibleConv, LoRACompatibleLinear
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils.import_utils import is_xformers_available
from einops import rearrange, repeat
The provided code snippet includes necessary dependencies for implementing the `my_repeat` function. Write a Python function `def my_repeat(tensor, num_repeats)` to solve the following problem:
Repeat a tensor along a given dimension
Here is the function:
def my_repeat(tensor, num_repeats):
"""
Repeat a tensor along a given dimension
"""
if len(tensor.shape) == 3:
return repeat(tensor, "b d c -> (b v) d c", v=num_repeats)
elif len(tensor.shape) == 4:
return repeat(tensor, "a b d c -> (a v) b d c", v=num_repeats) | Repeat a tensor along a given dimension |
162,721 | import os
from omegaconf import OmegaConf
from packaging import version
from typing import Dict, Optional, Tuple, List
from dataclasses import dataclass
def prompt(question):
inp = input(f"{question} (y/n)").lower().strip()
if inp and inp == 'y':
return True
if inp and inp == 'n':
return False
return prompt(question) | null |
162,722 | import os
from omegaconf import OmegaConf
from packaging import version
from typing import Dict, Optional, Tuple, List
from dataclasses import dataclass
OmegaConf.register_new_resolver('calc_exp_lr_decay_rate', lambda factor, n: factor**(1./n), replace=True)
OmegaConf.register_new_resolver('add', lambda a, b: a + b, replace=True)
OmegaConf.register_new_resolver('sub', lambda a, b: a - b, replace=True)
OmegaConf.register_new_resolver('mul', lambda a, b: a * b, replace=True)
OmegaConf.register_new_resolver('div', lambda a, b: a / b, replace=True)
OmegaConf.register_new_resolver('idiv', lambda a, b: a // b, replace=True)
OmegaConf.register_new_resolver('basename', lambda p: os.path.basename(p), replace=True)
class MVConfig:
def load_config(*yaml_files, cli_args=[]):
yaml_confs = [OmegaConf.load(f) for f in yaml_files]
cli_conf = OmegaConf.from_cli(cli_args)
conf = OmegaConf.merge(*yaml_confs, cli_conf)
OmegaConf.resolve(conf)
schema = OmegaConf.structured(MVConfig)
conf = OmegaConf.merge(schema, conf)
return conf | null |
162,723 | import os
from omegaconf import OmegaConf
from packaging import version
from typing import Dict, Optional, Tuple, List
from dataclasses import dataclass
OmegaConf.register_new_resolver('calc_exp_lr_decay_rate', lambda factor, n: factor**(1./n), replace=True)
OmegaConf.register_new_resolver('add', lambda a, b: a + b, replace=True)
OmegaConf.register_new_resolver('sub', lambda a, b: a - b, replace=True)
OmegaConf.register_new_resolver('mul', lambda a, b: a * b, replace=True)
OmegaConf.register_new_resolver('div', lambda a, b: a / b, replace=True)
OmegaConf.register_new_resolver('idiv', lambda a, b: a // b, replace=True)
OmegaConf.register_new_resolver('basename', lambda p: os.path.basename(p), replace=True)
def config_to_primitive(config, resolve=True):
return OmegaConf.to_container(config, resolve=resolve) | null |
162,724 | import os
from omegaconf import OmegaConf
from packaging import version
from typing import Dict, Optional, Tuple, List
from dataclasses import dataclass
OmegaConf.register_new_resolver('calc_exp_lr_decay_rate', lambda factor, n: factor**(1./n), replace=True)
OmegaConf.register_new_resolver('add', lambda a, b: a + b, replace=True)
OmegaConf.register_new_resolver('sub', lambda a, b: a - b, replace=True)
OmegaConf.register_new_resolver('mul', lambda a, b: a * b, replace=True)
OmegaConf.register_new_resolver('div', lambda a, b: a / b, replace=True)
OmegaConf.register_new_resolver('idiv', lambda a, b: a // b, replace=True)
OmegaConf.register_new_resolver('basename', lambda p: os.path.basename(p), replace=True)
def dump_config(path, config):
with open(path, 'w') as fp:
OmegaConf.save(config=config, f=fp) | null |
162,725 | import os
from omegaconf import OmegaConf
from packaging import version
from typing import Dict, Optional, Tuple, List
from dataclasses import dataclass
def get_rank():
# SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,
# therefore LOCAL_RANK needs to be checked first
rank_keys = ("RANK", "LOCAL_RANK", "SLURM_PROCID", "JSM_NAMESPACE_RANK")
for key in rank_keys:
rank = os.environ.get(key)
if rank is not None:
return int(rank)
return 0 | null |
162,726 | import os
from omegaconf import OmegaConf
from packaging import version
from typing import Dict, Optional, Tuple, List
from dataclasses import dataclass
def parse_version(ver):
return version.parse(ver) | null |
162,727 | import torch
import numpy as np
from kornia.geometry.conversions import (
quaternion_to_axis_angle,
axis_angle_to_quaternion,
)
import pymeshlab as pml
from plyfile import PlyData, PlyElement
from .mesh import PointCloud
from shared_utils.sh_utils import SH2RGB, RGB2SH
def _base_face_areas(face_vertices_0, face_vertices_1, face_vertices_2):
"""Base function to compute the face areas."""
x1, x2, x3 = torch.split(face_vertices_0 - face_vertices_1, 1, dim=-1)
y1, y2, y3 = torch.split(face_vertices_1 - face_vertices_2, 1, dim=-1)
a = (x2 * y3 - x3 * y2) ** 2
b = (x3 * y1 - x1 * y3) ** 2
c = (x1 * y2 - x2 * y1) ** 2
areas = torch.sqrt(a + b + c) * 0.5
return areas
def _base_sample_points_selected_faces(face_vertices, face_features=None):
"""Base function to sample points over selected faces, sample one point per face.
The coordinates of the face vertices are interpolated to generate new samples.
Args:
face_vertices (tuple of torch.Tensor):
Coordinates of vertices, corresponding to selected faces to sample from.
A tuple of 3 entries corresponding to each of the face vertices.
Each entry is a torch.Tensor of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`.
face_features (tuple of torch.Tensor, Optional):
Features of face vertices, corresponding to selected faces to sample from.
A tuple of 3 entries corresponding to each of the face vertices.
Each entry is a torch.Tensor of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})`.
Returns:
(torch.Tensor, torch.Tensor):
Sampled point coordinates of shape :math:`(\\text{batch_size}, \\text{num_samples}, 3)`.
Sampled points interpolated features of shape
:math:`(\\text{batch_size}, \\text{num_samples}, \\text{feature_dim})`.
If `face_vertices_features` arg is not specified, the returned interpolated features are None.
"""
face_vertices0, face_vertices1, face_vertices2 = face_vertices
sampling_shape = tuple(int(d) for d in face_vertices0.shape[:-1]) + (1,)
# u is proximity to middle point between v1 and v2 against v0.
# v is proximity to v2 against v1.
#
# The probability density for u should be f_U(u) = 2u.
# However, torch.rand use a uniform (f_X(x) = x) distribution,
# so using torch.sqrt we make a change of variable to have the desired density
# f_Y(y) = f_X(y ^ 2) * |d(y ^ 2) / dy| = 2y
u = torch.sqrt(torch.rand(sampling_shape,
device=face_vertices0.device,
dtype=face_vertices0.dtype))
v = torch.rand(sampling_shape,
device=face_vertices0.device,
dtype=face_vertices0.dtype)
w0 = 1 - u
w1 = u * (1 - v)
w2 = u * v
points = w0 * face_vertices0 + w1 * face_vertices1 + w2 * face_vertices2
features = None
if face_features is not None:
face_features0, face_features1, face_features2 = face_features
features = w0 * face_features0 + w1 * face_features1 + \
w2 * face_features2
return points, features
The provided code snippet includes necessary dependencies for implementing the `sample_points` function. Write a Python function `def sample_points(vertices, faces, num_samples, areas=None, face_features=None)` to solve the following problem:
r"""Uniformly sample points over the surface of triangle meshes. First, face on which the point is sampled is randomly selected, with the probability of selection being proportional to the area of the face. then the coordinate on the face is uniformly sampled. If ``face_features`` is defined for the mesh faces, the sampled points will be returned with interpolated features as well, otherwise, no feature interpolation will occur. Args: vertices (torch.Tensor): The vertices of the meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): The faces of the mesh, of shape :math:`(\text{num_faces}, 3)`. num_samples (int): The number of point sampled per mesh. Also the number of faces sampled per mesh, and then sample a single point per face. areas (torch.Tensor, optional): The areas of each face, of shape :math:`(\text{batch_size}, \text{num_faces})`, can be preprocessed, for fast on-the-fly sampling, will be computed if None (default). face_features (torch.Tensor, optional): Per-vertex-per-face features, matching ``faces`` order, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`. For example: 1. Texture uv coordinates would be of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`. 2. RGB color values would be of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`. When specified, it is used to interpolate the features for new sampled points. See also: :func:`~kaolin.ops.mesh.index_vertices_by_faces` for conversion of features defined per vertex and need to be converted to per-vertex-per-face shape of :math:`(\text{num_faces}, 3)`. Returns: (torch.Tensor, torch.LongTensor, (optional) torch.Tensor): the pointclouds of shape :math:`(\text{batch_size}, \text{num_samples}, 3)`, and the indexes of the faces selected, of shape :math:`(\text{batch_size}, \text{num_samples})`. If ``face_features`` arg is specified, then the interpolated features of sampled points of shape :math:`(\text{batch_size}, \text{num_samples}, \text{feature_dim})` are also returned.
Here is the function:
def sample_points(vertices, faces, num_samples, areas=None, face_features=None):
r"""Uniformly sample points over the surface of triangle meshes.
First, face on which the point is sampled is randomly selected,
with the probability of selection being proportional to the area of the face.
then the coordinate on the face is uniformly sampled.
If ``face_features`` is defined for the mesh faces,
the sampled points will be returned with interpolated features as well,
otherwise, no feature interpolation will occur.
Args:
vertices (torch.Tensor):
The vertices of the meshes, of shape
:math:`(\text{batch_size}, \text{num_vertices}, 3)`.
faces (torch.LongTensor):
The faces of the mesh, of shape :math:`(\text{num_faces}, 3)`.
num_samples (int):
The number of point sampled per mesh.
Also the number of faces sampled per mesh, and then sample a single point per face.
areas (torch.Tensor, optional):
The areas of each face, of shape :math:`(\text{batch_size}, \text{num_faces})`,
can be preprocessed, for fast on-the-fly sampling,
will be computed if None (default).
face_features (torch.Tensor, optional):
Per-vertex-per-face features, matching ``faces`` order,
of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`.
For example:
1. Texture uv coordinates would be of shape
:math:`(\text{batch_size}, \text{num_faces}, 3, 2)`.
2. RGB color values would be of shape
:math:`(\text{batch_size}, \text{num_faces}, 3, 3)`.
When specified, it is used to interpolate the features for new sampled points.
See also:
:func:`~kaolin.ops.mesh.index_vertices_by_faces` for conversion of features defined per vertex
and need to be converted to per-vertex-per-face shape of :math:`(\text{num_faces}, 3)`.
Returns:
(torch.Tensor, torch.LongTensor, (optional) torch.Tensor):
the pointclouds of shape :math:`(\text{batch_size}, \text{num_samples}, 3)`,
and the indexes of the faces selected,
of shape :math:`(\text{batch_size}, \text{num_samples})`.
If ``face_features`` arg is specified, then the interpolated features of sampled points of shape
:math:`(\text{batch_size}, \text{num_samples}, \text{feature_dim})` are also returned.
"""
if faces.shape[-1] != 3:
raise NotImplementedError("sample_points is only implemented for triangle meshes")
faces_0, faces_1, faces_2 = torch.split(faces, 1, dim=1) # (num_faces, 3) -> tuple of (num_faces,)
face_v_0 = torch.index_select(vertices, 1, faces_0.reshape(-1)) # (batch_size, num_faces, 3)
face_v_1 = torch.index_select(vertices, 1, faces_1.reshape(-1)) # (batch_size, num_faces, 3)
face_v_2 = torch.index_select(vertices, 1, faces_2.reshape(-1)) # (batch_size, num_faces, 3)
if areas is None:
areas = _base_face_areas(face_v_0, face_v_1, face_v_2).squeeze(-1)
face_dist = torch.distributions.Categorical(areas)
face_choices = face_dist.sample([num_samples]).transpose(0, 1)
_face_choices = face_choices.unsqueeze(-1).repeat(1, 1, 3)
v0 = torch.gather(face_v_0, 1, _face_choices) # (batch_size, num_samples, 3)
v1 = torch.gather(face_v_1, 1, _face_choices) # (batch_size, num_samples, 3)
v2 = torch.gather(face_v_2, 1, _face_choices) # (batch_size, num_samples, 3)
face_vertices_choices = (v0, v1, v2)
# UV coordinates are available, make sure to calculate them for sampled points as well
face_features_choices = None
if face_features is not None:
feat_dim = face_features.shape[-1]
# (num_faces, 3) -> tuple of (num_faces,)
_face_choices = face_choices[..., None, None].repeat(1, 1, 3, feat_dim)
face_features_choices = torch.gather(face_features, 1, _face_choices)
face_features_choices = tuple(
tmp_feat.squeeze(2) for tmp_feat in torch.split(face_features_choices, 1, dim=2))
points, point_features = _base_sample_points_selected_faces(
face_vertices_choices, face_features_choices)
if point_features is not None:
return points, face_choices, point_features
else:
return points, face_choices | r"""Uniformly sample points over the surface of triangle meshes. First, face on which the point is sampled is randomly selected, with the probability of selection being proportional to the area of the face. then the coordinate on the face is uniformly sampled. If ``face_features`` is defined for the mesh faces, the sampled points will be returned with interpolated features as well, otherwise, no feature interpolation will occur. Args: vertices (torch.Tensor): The vertices of the meshes, of shape :math:`(\text{batch_size}, \text{num_vertices}, 3)`. faces (torch.LongTensor): The faces of the mesh, of shape :math:`(\text{num_faces}, 3)`. num_samples (int): The number of point sampled per mesh. Also the number of faces sampled per mesh, and then sample a single point per face. areas (torch.Tensor, optional): The areas of each face, of shape :math:`(\text{batch_size}, \text{num_faces})`, can be preprocessed, for fast on-the-fly sampling, will be computed if None (default). face_features (torch.Tensor, optional): Per-vertex-per-face features, matching ``faces`` order, of shape :math:`(\text{batch_size}, \text{num_faces}, 3, \text{feature_dim})`. For example: 1. Texture uv coordinates would be of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 2)`. 2. RGB color values would be of shape :math:`(\text{batch_size}, \text{num_faces}, 3, 3)`. When specified, it is used to interpolate the features for new sampled points. See also: :func:`~kaolin.ops.mesh.index_vertices_by_faces` for conversion of features defined per vertex and need to be converted to per-vertex-per-face shape of :math:`(\text{num_faces}, 3)`. Returns: (torch.Tensor, torch.LongTensor, (optional) torch.Tensor): the pointclouds of shape :math:`(\text{batch_size}, \text{num_samples}, 3)`, and the indexes of the faces selected, of shape :math:`(\text{batch_size}, \text{num_samples})`. If ``face_features`` arg is specified, then the interpolated features of sampled points of shape :math:`(\text{batch_size}, \text{num_samples}, \text{feature_dim})` are also returned. |
162,728 | import torch
import numpy as np
from kornia.geometry.conversions import (
quaternion_to_axis_angle,
axis_angle_to_quaternion,
)
import pymeshlab as pml
from plyfile import PlyData, PlyElement
from .mesh import PointCloud
from shared_utils.sh_utils import SH2RGB, RGB2SH
class PointCloud(NamedTuple):
points: np.array
colors: np.array
normals: np.array
def poisson_mesh_reconstruction(points, normals=None):
# points/normals: [N, 3] np.ndarray
import open3d as o3d
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
# outlier removal
pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=10)
# normals
if normals is None:
pcd.estimate_normals()
else:
pcd.normals = o3d.utility.Vector3dVector(normals[ind])
# visualize
o3d.visualization.draw_geometries([pcd], point_show_normal=False)
mesh, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(
pcd, depth=9
)
vertices_to_remove = densities < np.quantile(densities, 0.1)
mesh.remove_vertices_by_mask(vertices_to_remove)
# visualize
o3d.visualization.draw_geometries([mesh])
vertices = np.asarray(mesh.vertices)
triangles = np.asarray(mesh.triangles)
print(
f"[INFO] poisson mesh reconstruction: {points.shape} --> {vertices.shape} / {triangles.shape}"
)
return vertices, triangles | null |
162,729 | import torch
import numpy as np
from kornia.geometry.conversions import (
quaternion_to_axis_angle,
axis_angle_to_quaternion,
)
import pymeshlab as pml
from plyfile import PlyData, PlyElement
from .mesh import PointCloud
from shared_utils.sh_utils import SH2RGB, RGB2SH
def decimate_mesh(
verts, faces, target, backend="pymeshlab", remesh=False, optimalplacement=True
):
# optimalplacement: default is True, but for flat mesh must turn False to prevent spike artifect.
_ori_vert_shape = verts.shape
_ori_face_shape = faces.shape
if backend == "pyfqmr":
import pyfqmr
solver = pyfqmr.Simplify()
solver.setMesh(verts, faces)
solver.simplify_mesh(target_count=target, preserve_border=False, verbose=False)
verts, faces, normals = solver.getMesh()
else:
m = pml.Mesh(verts, faces)
ms = pml.MeshSet()
ms.add_mesh(m, "mesh") # will copy!
# filters
# ms.meshing_decimation_clustering(threshold=pml.Percentage(1))
ms.meshing_decimation_quadric_edge_collapse(
targetfacenum=int(target), optimalplacement=optimalplacement
)
if remesh:
# ms.apply_coord_taubin_smoothing()
ms.meshing_isotropic_explicit_remeshing(
iterations=3, targetlen=pml.Percentage(1)
)
# extract mesh
m = ms.current_mesh()
verts = m.vertex_matrix()
faces = m.face_matrix()
print(
f"[INFO] mesh decimation: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}"
)
return verts, faces | null |
162,730 | import torch
import numpy as np
from kornia.geometry.conversions import (
quaternion_to_axis_angle,
axis_angle_to_quaternion,
)
import pymeshlab as pml
from plyfile import PlyData, PlyElement
from .mesh import PointCloud
from shared_utils.sh_utils import SH2RGB, RGB2SH
def clean_mesh(
verts,
faces,
v_pct=1,
min_f=64,
min_d=20,
repair=True,
remesh=True,
remesh_size=0.01,
):
# verts: [N, 3]
# faces: [N, 3]
_ori_vert_shape = verts.shape
_ori_face_shape = faces.shape
m = pml.Mesh(verts, faces)
ms = pml.MeshSet()
ms.add_mesh(m, "mesh") # will copy!
# filters
ms.meshing_remove_unreferenced_vertices() # verts not refed by any faces
if v_pct > 0:
ms.meshing_merge_close_vertices(
threshold=pml.Percentage(v_pct)
) # 1/10000 of bounding box diagonal
ms.meshing_remove_duplicate_faces() # faces defined by the same verts
ms.meshing_remove_null_faces() # faces with area == 0
if min_d > 0:
ms.meshing_remove_connected_component_by_diameter(
mincomponentdiag=pml.Percentage(min_d)
)
if min_f > 0:
ms.meshing_remove_connected_component_by_face_number(mincomponentsize=min_f)
if repair:
# ms.meshing_remove_t_vertices(method=0, threshold=40, repeat=True)
ms.meshing_repair_non_manifold_edges(method=0)
ms.meshing_repair_non_manifold_vertices(vertdispratio=0)
if remesh:
# ms.apply_coord_taubin_smoothing()
ms.meshing_isotropic_explicit_remeshing(
iterations=3, targetlen=pml.AbsoluteValue(remesh_size)
)
# extract mesh
m = ms.current_mesh()
verts = m.vertex_matrix()
faces = m.face_matrix()
print(
f"[INFO] mesh cleaning: {_ori_vert_shape} --> {verts.shape}, {_ori_face_shape} --> {faces.shape}"
)
return verts, faces | null |
162,731 | import torch
import numpy as np
from kornia.geometry.conversions import (
quaternion_to_axis_angle,
axis_angle_to_quaternion,
)
import pymeshlab as pml
from plyfile import PlyData, PlyElement
from .mesh import PointCloud
from shared_utils.sh_utils import SH2RGB, RGB2SH
def read_gs_ply(plydata):
class PointCloud(NamedTuple):
def SH2RGB(sh):
def ply_to_points_cloud(plydata):
xyz, features_dc, features_extra, opacities, scales, rots = read_gs_ply(plydata)
features_dc = np.transpose(features_dc, (0, 2, 1)) # equivalent of torch.transpose(features_dc, 1, 2)
features_extra = np.transpose(features_extra, (0, 2, 1))
shs = np.concatenate((features_dc, features_extra), axis=1)
normals = np.zeros_like(xyz)
pcd = PointCloud(points=xyz, colors=SH2RGB(shs), normals=normals)
return pcd | null |
162,732 | import torch
import numpy as np
from kornia.geometry.conversions import (
quaternion_to_axis_angle,
axis_angle_to_quaternion,
)
import pymeshlab as pml
from plyfile import PlyData, PlyElement
from .mesh import PointCloud
from shared_utils.sh_utils import SH2RGB, RGB2SH
The provided code snippet includes necessary dependencies for implementing the `get_target_axis_and_scale` function. Write a Python function `def get_target_axis_and_scale(axis_string, scale_value=1.0)` to solve the following problem:
Coordinate system inverts when: 1. Any of the axis inverts 2. Two of the axises switch If coordinate system inverts twice in a row then it will not be inverted
Here is the function:
def get_target_axis_and_scale(axis_string, scale_value=1.0):
"""
Coordinate system inverts when:
1. Any of the axis inverts
2. Two of the axises switch
If coordinate system inverts twice in a row then it will not be inverted
"""
axis_names = ["x", "y", "z"]
target_axis, target_scale, coordinate_invert_count = [], [], 0
axis_switch_count = 0
for i in range(len(axis_names)):
s = axis_string[i]
if s[0] == "-":
target_scale.append(-scale_value)
coordinate_invert_count += 1
else:
target_scale.append(scale_value)
new_axis_i = axis_names.index(s[1])
if new_axis_i != i:
axis_switch_count += 1
target_axis.append(new_axis_i)
if axis_switch_count == 2:
coordinate_invert_count += 1
return target_axis, target_scale, coordinate_invert_count | Coordinate system inverts when: 1. Any of the axis inverts 2. Two of the axises switch If coordinate system inverts twice in a row then it will not be inverted |
162,733 | import torch
import numpy as np
from kornia.geometry.conversions import (
quaternion_to_axis_angle,
axis_angle_to_quaternion,
)
import pymeshlab as pml
from plyfile import PlyData, PlyElement
from .mesh import PointCloud
from shared_utils.sh_utils import SH2RGB, RGB2SH
def construct_list_of_gs_attributes(features_dc, features_rest, scaling, rotation):
l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
for i in range(features_dc.shape[1]*features_dc.shape[2]):
l.append('f_dc_{}'.format(i))
for i in range(features_rest.shape[1]*features_rest.shape[2]):
l.append('f_rest_{}'.format(i))
l.append('opacity')
for i in range(scaling.shape[1]):
l.append('scale_{}'.format(i))
for i in range(rotation.shape[1]):
l.append('rot_{}'.format(i))
return l
def write_gs_ply(xyz, normals, f_dc, f_rest, opacities, scale, rotation, list_of_attributes):
dtype_full = [(attribute, 'f4') for attribute in list_of_attributes]
elements = np.empty(xyz.shape[0], dtype=dtype_full)
attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
elements[:] = list(map(tuple, attributes))
el = PlyElement.describe(elements, 'vertex')
return PlyData([el])
def read_gs_ply(plydata):
xyz = np.stack((np.asarray(plydata.elements[0]["x"]),
np.asarray(plydata.elements[0]["y"]),
np.asarray(plydata.elements[0]["z"])), axis=1)
opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis]
features_dc = np.zeros((xyz.shape[0], 3, 1))
features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"])
features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"])
features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"])
max_sh_degree, extra_f_names = calculate_max_sh_degree_from_gs_ply(plydata)
features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))
for idx, attr_name in enumerate(extra_f_names):
features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])
# Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)
features_extra = features_extra.reshape((features_extra.shape[0], 3, (max_sh_degree + 1) ** 2 - 1))
scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")]
scales = np.zeros((xyz.shape[0], len(scale_names)))
for idx, attr_name in enumerate(scale_names):
scales[:, idx] = np.asarray(plydata.elements[0][attr_name])
rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")]
rots = np.zeros((xyz.shape[0], len(rot_names)))
for idx, attr_name in enumerate(rot_names):
rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
return xyz, features_dc, features_extra, opacities, scales, rots
def switch_vector_axis(vector3_tensor, target_axis):
"""
Example:
vector3_tensor = torch.tensor([[1, 2, 3], [3, 2, 1], [2, 3, 1]]) # shape (N, 3)
target_axis = (2, 0, 1) # or [2, 0, 1]
vector3_tensor[:, [0, 1, 2]] = vector3_tensor[:, target_axis]
# Result: tensor([[3, 1, 2], [1, 3, 2], [1, 2, 3]])
"""
vector3_tensor[:, [0, 1, 2]] = vector3_tensor[:, target_axis]
return vector3_tensor
The provided code snippet includes necessary dependencies for implementing the `switch_ply_axis_and_scale` function. Write a Python function `def switch_ply_axis_and_scale(plydata, target_axis, target_scale, coordinate_invert_count)` to solve the following problem:
Args: target_axis (array): shape (3) target_scale (array): shape (3)
Here is the function:
def switch_ply_axis_and_scale(plydata, target_axis, target_scale, coordinate_invert_count):
"""
Args:
target_axis (array): shape (3)
target_scale (array): shape (3)
"""
xyz, features_dc, features_extra, opacities, scales, rots = read_gs_ply(plydata)
normals = np.zeros_like(xyz)
features_dc_2d = features_dc.reshape(features_dc.shape[0], features_dc.shape[1]*features_dc.shape[2])
features_extra_2d = features_extra.reshape(features_extra.shape[0], features_extra.shape[1]*features_extra.shape[2])
target_scale = torch.tensor(target_scale).float().cuda()
xyz = switch_vector_axis(torch.tensor(xyz).float().cuda() * target_scale, target_axis).detach().cpu().numpy()
scales = switch_vector_axis(torch.tensor(scales).float().cuda(), target_axis).detach().cpu().numpy()
# change rotation representation from quaternion (w, x, y, z) to axis angle vector (x, y, z) to make swich axis easier
rots_axis_angle = quaternion_to_axis_angle(torch.tensor(rots).float().cuda())
rots_axis_angle = switch_vector_axis(rots_axis_angle * target_scale, target_axis)
"""
Since axis–angle vector is composed of axis (unit vector/direction) and clockwise radians angle (vector magnitude),
so in order to invert the sign of angle when coordinate system inverts, we also need to invert the direction of axis–angle vector
"""
if coordinate_invert_count % 2 != 0:
rots_axis_angle = -rots_axis_angle
rots = axis_angle_to_quaternion(rots_axis_angle).detach().cpu().numpy()
return write_gs_ply(xyz, normals, features_dc_2d, features_extra_2d, opacities, scales, rots, construct_list_of_gs_attributes(features_dc, features_extra, scales, rots)) | Args: target_axis (array): shape (3) target_scale (array): shape (3) |
162,734 | import torch
import numpy as np
from kornia.geometry.conversions import (
quaternion_to_axis_angle,
axis_angle_to_quaternion,
)
import pymeshlab as pml
from plyfile import PlyData, PlyElement
from .mesh import PointCloud
from shared_utils.sh_utils import SH2RGB, RGB2SH
def switch_vector_axis(vector3_tensor, target_axis):
"""
Example:
vector3_tensor = torch.tensor([[1, 2, 3], [3, 2, 1], [2, 3, 1]]) # shape (N, 3)
target_axis = (2, 0, 1) # or [2, 0, 1]
vector3_tensor[:, [0, 1, 2]] = vector3_tensor[:, target_axis]
# Result: tensor([[3, 1, 2], [1, 3, 2], [1, 2, 3]])
"""
vector3_tensor[:, [0, 1, 2]] = vector3_tensor[:, target_axis]
return vector3_tensor
The provided code snippet includes necessary dependencies for implementing the `switch_mesh_axis_and_scale` function. Write a Python function `def switch_mesh_axis_and_scale(mesh, target_axis, target_scale, flip_normal=False)` to solve the following problem:
Args: target_axis (array): shape (3) target_scale (array): shape (3)
Here is the function:
def switch_mesh_axis_and_scale(mesh, target_axis, target_scale, flip_normal=False):
"""
Args:
target_axis (array): shape (3)
target_scale (array): shape (3)
"""
target_scale = torch.tensor(target_scale).float().cuda()
mesh.v = switch_vector_axis(mesh.v * target_scale, target_axis)
mesh.vn = switch_vector_axis(mesh.vn * target_scale, target_axis)
if flip_normal:
mesh.vn *= -1
return mesh | Args: target_axis (array): shape (3) target_scale (array): shape (3) |
162,735 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1) | null |
162,736 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step) | null |
162,737 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_fn` function. Write a Python function `def create_learning_rate_fn( num_train_steps: int, train_batch_size: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]` to solve the following problem:
Returns a linear warmup, linear_decay learning rate function.
Here is the function:
def create_learning_rate_fn(
num_train_steps: int, train_batch_size: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn | Returns a linear warmup, linear_decay learning rate function. |
162,738 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def gpt3_schedule(warmup_steps,
total_steps,
peak_lr,
end_lr):
def sch(step):
warmup_pct = jnp.clip(step, 0, warmup_steps) / warmup_steps
anneal_pct = jnp.clip(step - warmup_steps, 0, total_steps) / total_steps
return warmup_pct * peak_lr - (peak_lr - end_lr) * (1 - jnp.cos(jnp.pi * anneal_pct)) / 2
return sch | null |
162,739 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
logger = logging.getLogger(__name__)
def mb_item(x):
return x.item() if hasattr(x, "item") else x
The provided code snippet includes necessary dependencies for implementing the `save_model_checkpoint` function. Write a Python function `def save_model_checkpoint(model, save_dir, state, with_opt:bool=True, push_to_hub:bool=False)` to solve the following problem:
If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`.
Here is the function:
def save_model_checkpoint(model, save_dir, state, with_opt:bool=True, push_to_hub:bool=False):
"""
If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`.
"""
state = jax_utils.unreplicate(state)
logger.info(f"SAVING CHECKPOINT IN {save_dir}...")
if not push_to_hub:
save_dir = f"{save_dir}/ckpt-{mb_item(state.step)-1}"
model.save_pretrained(
save_dir,
params=state.params,
push_to_hub=push_to_hub,
commit_message=f"Saving weights and logs at step {mb_item(state.step)-1}",
)
if with_opt:
with open(os.path.join(save_dir, "opt_state.msgpack"), "wb") as f:
f.write(to_bytes(state.opt_state))
with open(os.path.join(save_dir, "training_state.json"), "w") as f:
json.dump({"step": state.step.item()}, f)
logger.info("checkpoint saved") | If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`. |
162,740 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def _zeros_tree_like(inp_tree):
def fake_update(state):
fake_updates = _zeros_tree_like(state.params)
_, new_inner_opt_state = state.tx.inner_opt.update(fake_updates, state.opt_state.inner_opt_state, state.params)
opt_state = state.opt_state
new_opt_state = optax.MultiStepsState(mini_step=opt_state.mini_step,
gradient_step=opt_state.gradient_step,
inner_opt_state=new_inner_opt_state,
acc_grads=opt_state.acc_grads)
return state.replace(opt_state=new_opt_state) | null |
162,741 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
logger = logging.getLogger(__name__)
def reinstantiate_states(opt_state):
new_state = []
for state in opt_state:
if isinstance(state, list):
new_state.append(reinstantiate_states(state))
else:
cls = getattr(optax, type(state).__name__)
new_state.append(cls(**{k:getattr(state, k) for k in state._fields}))
return new_state
def restore_model_checkpoint(save_dir, state):
logger.info(f"RESTORING CHECKPOINT FROM {save_dir}...")
with open(os.path.join(save_dir, "flax_model.msgpack"), "rb") as f:
params = from_bytes(state.params, f.read())
with open(os.path.join(save_dir, "opt_state.msgpack"), "rb") as f:
opt_state = from_bytes(state.opt_state, f.read())
with open(os.path.join(save_dir, "training_state.json"), "r") as f:
training_state = json.load(f)
step = training_state["step"]
logger.info("checkpoint restored")
# reinstantiate inner opt state to avoid type conflict
if hasattr(opt_state, "inner_opt_state"):
print("restoring state ofmultisteps optimizer")
inner_opt_state = reinstantiate_states(opt_state.inner_opt_state)
ms_state_dict = {k:getattr(state.opt_state, k) for k in state.opt_state._fields}
ms_state_dict["inner_opt_state"] = inner_opt_state
opt_state = optax.MultiStepsState(**ms_state_dict)
return state.replace(step=step, params=params, opt_state=opt_state) | null |
162,742 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `rotate_checkpoints` function. Write a Python function `def rotate_checkpoints(ckpt_dir:str, save_total_limit:int)` to solve the following problem:
Removes older checkpoints so that `save_total_limit` checkpoints are kept
Here is the function:
def rotate_checkpoints(ckpt_dir:str, save_total_limit:int):
"Removes older checkpoints so that `save_total_limit` checkpoints are kept"
# TODO: what to remove is decided using step number only, we might want to improve that
ckpts = [str(x) for x in Path(ckpt_dir).glob("ckpt-*")]
# sort checkpoints by step
ckpts_sorted = sorted(ckpts, key=lambda x: int(x.split('-')[-1]))
ckpts_to_delete = ckpts_sorted[:-save_total_limit]
for ckpt in ckpts_to_delete:
logger.info(f"Deleting older checkpoint [{ckpt}] due to save_total_limit ({save_total_limit})")
shutil.rmtree(ckpt) | Removes older checkpoints so that `save_total_limit` checkpoints are kept |
162,743 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
IntervalStrategy
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
The provided code snippet includes necessary dependencies for implementing the `data_loader` function. Write a Python function `def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False)` to solve the following problem:
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`.
Here is the function:
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if `shuffle` is `True`.
"""
steps_per_epoch = len(dataset) // batch_size
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: jnp.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch | Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`. |
162,744 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
IntervalStrategy
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1) | null |
162,745 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
IntervalStrategy
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step) | null |
162,746 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
IntervalStrategy
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_fn` function. Write a Python function `def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]` to solve the following problem:
Returns a linear warmup, linear_decay learning rate function.
Here is the function:
def create_learning_rate_fn(
train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
steps_per_epoch = train_ds_size // train_batch_size
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn | Returns a linear warmup, linear_decay learning rate function. |
162,747 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
IntervalStrategy
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
logger = logging.getLogger(__name__)
def mb_item(x):
return x.item() if hasattr(x, "item") else x
The provided code snippet includes necessary dependencies for implementing the `save_model_checkpoint` function. Write a Python function `def save_model_checkpoint(model, save_dir, state, with_opt:bool=True, push_to_hub:bool=False)` to solve the following problem:
If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`.
Here is the function:
def save_model_checkpoint(model, save_dir, state, with_opt:bool=True, push_to_hub:bool=False):
"""
If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`.
"""
state = jax_utils.unreplicate(state)
logger.info(f"SAVING CHECKPOINT IN {save_dir}...")
if not push_to_hub:
save_dir = f"{save_dir}/ckpt-{mb_item(state.step)-1}"
model.save_pretrained(
save_dir,
params=state.params,
push_to_hub=push_to_hub,
commit_message=f"Saving weights and logs at step {mb_item(state.step)-1}",
)
if with_opt:
with open(os.path.join(save_dir, "opt_state.msgpack"), "wb") as f:
f.write(to_bytes(state.opt_state))
with open(os.path.join(save_dir, "training_state.json"), "w") as f:
json.dump({"step": state.step.item()}, f)
logger.info("checkpoint saved") | If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`. |
162,748 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
IntervalStrategy
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
def _zeros_tree_like(inp_tree):
def fake_update(state):
fake_updates = _zeros_tree_like(state.params)
_, new_inner_opt_state = state.tx.inner_opt.update(fake_updates, state.opt_state.inner_opt_state, state.params)
opt_state = state.opt_state
new_opt_state = optax.MultiStepsState(mini_step=opt_state.mini_step,
gradient_step=opt_state.gradient_step,
inner_opt_state=new_inner_opt_state,
acc_grads=opt_state.acc_grads)
return state.replace(opt_state=new_opt_state) | null |
162,749 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
IntervalStrategy
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
logger = logging.getLogger(__name__)
def reinstantiate_states(opt_state):
new_state = []
for state in opt_state:
if isinstance(state, list):
new_state.append(reinstantiate_states(state))
else:
cls = getattr(optax, type(state).__name__)
new_state.append(cls(**{k:getattr(state, k) for k in state._fields}))
return new_state
def restore_model_checkpoint(save_dir, state):
logger.info(f"RESTORING CHECKPOINT FROM {save_dir}...")
with open(os.path.join(save_dir, "flax_model.msgpack"), "rb") as f:
params = from_bytes(state.params, f.read())
with open(os.path.join(save_dir, "opt_state.msgpack"), "rb") as f:
opt_state = from_bytes(state.opt_state, f.read())
with open(os.path.join(save_dir, "training_state.json"), "r") as f:
training_state = json.load(f)
step = training_state["step"]
logger.info("checkpoint restored")
# reinstantiate inner opt state to avoid type conflict
if hasattr(opt_state, "inner_opt_state"):
print("restoring state of multisteps optimizer")
inner_opt_state = reinstantiate_states(opt_state.inner_opt_state)
ms_state_dict = {k:getattr(state.opt_state, k) for k in state.opt_state._fields}
ms_state_dict["inner_opt_state"] = inner_opt_state
opt_state = optax.MultiStepsState(**ms_state_dict)
return state.replace(step=step, params=params, opt_state=opt_state) | null |
162,750 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
IntervalStrategy
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `rotate_checkpoints` function. Write a Python function `def rotate_checkpoints(ckpt_dir:str, save_total_limit:int)` to solve the following problem:
Removes older checkpoints so that `save_total_limit` checkpoints are kept
Here is the function:
def rotate_checkpoints(ckpt_dir:str, save_total_limit:int):
"Removes older checkpoints so that `save_total_limit` checkpoints are kept"
# TODO: what to remove is decided using step number only, we might want to improve that
ckpts = [str(x) for x in Path(ckpt_dir).glob("ckpt-*")]
# sort checkpoints by step
ckpts_sorted = sorted(ckpts, key=lambda x: int(x.split('-')[-1]))
ckpts_to_delete = ckpts_sorted[:-save_total_limit]
for ckpt in ckpts_to_delete:
logger.info(f"Deleting older checkpoint [{ckpt}] due to save_total_limit ({save_total_limit})")
shutil.rmtree(ckpt) | Removes older checkpoints so that `save_total_limit` checkpoints are kept |
162,751 | import numpy as np
import threading
import queue
import multiprocessing
from collections import defaultdict
import jax
import jax.numpy as jnp
def make_batch(samples):
batch = {k:jnp.array(v) for k,v in samples.items()}
batch['labels'] = batch['input_ids'].copy()
return batch | null |
162,752 | import json
from fastcore.script import *
from transformers import AutoModelForCausalLM, AutoTokenizer, FlaxAutoModelForCausalLM
def fix_model_embds(original_model, new_model, tokenizer):
def add_new_tokens_to_model(model_name, new_tokens):
tokenizer = AutoTokenizer.from_pretrained(
model_name, additional_special_tokens=new_tokens
)
original_model = AutoModelForCausalLM.from_pretrained(model_name)
new_model = AutoModelForCausalLM.from_pretrained(model_name)
new_model.resize_token_embeddings(len(tokenizer.vocab))
fix_model_embds(original_model, new_model, tokenizer)
return new_model, tokenizer | null |
162,758 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from flax import training
import numpy as np
import datasets
from datasets import load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
import flax
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def _zeros_tree_like(inp_tree):
return jax.tree_map(jnp.zeros_like, inp_tree)
def fake_update(state):
fake_updates = _zeros_tree_like(state.params)
_, new_inner_opt_state = state.tx.inner_opt.update(fake_updates, state.opt_state.inner_opt_state, state.params)
opt_state = state.opt_state
new_opt_state = optax.MultiStepsState(mini_step=opt_state.mini_step,
gradient_step=opt_state.gradient_step,
inner_opt_state=new_inner_opt_state,
acc_grads=opt_state.acc_grads)
return state.replace(opt_state=new_opt_state) | null |
162,761 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:
num_samples = len(samples_idx)
samples_to_remove = num_samples % batch_size
if samples_to_remove != 0:
samples_idx = samples_idx[:-samples_to_remove]
sections_split = num_samples // batch_size
batch_idx = np.split(samples_idx, sections_split)
return batch_idx | null |
162,762 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
The provided code snippet includes necessary dependencies for implementing the `advance_iter_and_group_samples` function. Write a Python function `def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length)` to solve the following problem:
The training iterator is advanced so that after groupifying the samples, `num_samples` of length `max_seq_length` are returned.
Here is the function:
def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length):
"""
The training iterator is advanced so that after groupifying the samples,
`num_samples` of length `max_seq_length` are returned.
"""
num_total_tokens = max_seq_length * num_samples
samples = defaultdict(list)
i = 0
while i < num_total_tokens:
tokenized_samples = next(train_iterator)
i += len(tokenized_samples["input_ids"])
# concatenate tokenized samples to list
samples = {k: samples[k] + tokenized_samples[k] for k in tokenized_samples.keys()}
# Concatenated tokens are split to lists of length `max_seq_length`.
# Note that remainedr of % max_seq_length are thrown away.
def group_texts(examples):
result = {
k: [t[i : i + max_seq_length] for i in range(0, num_total_tokens, max_seq_length)]
for k, t in examples.items()
}
return result
grouped_samples = group_texts(samples)
return grouped_samples | The training iterator is advanced so that after groupifying the samples, `num_samples` of length `max_seq_length` are returned. |
162,763 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
The provided code snippet includes necessary dependencies for implementing the `data_loader` function. Write a Python function `def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False)` to solve the following problem:
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`.
Here is the function:
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if `shuffle` is `True`.
"""
steps_per_epoch = len(dataset) // batch_size
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: jnp.array(v) for k, v in batch.items()}
batch = shard(batch)
yield batch | Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`. |
162,764 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1) | null |
162,765 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step) | null |
162,766 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_fn` function. Write a Python function `def create_learning_rate_fn( num_train_steps: int, train_batch_size: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]` to solve the following problem:
Returns a linear warmup, linear_decay learning rate function.
Here is the function:
def create_learning_rate_fn(
num_train_steps: int, train_batch_size: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn | Returns a linear warmup, linear_decay learning rate function. |
162,767 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
def gpt3_schedule(warmup_steps,
total_steps,
peak_lr,
end_lr):
def sch(step):
warmup_pct = jnp.clip(step, 0, warmup_steps) / warmup_steps
anneal_pct = jnp.clip(step - warmup_steps, 0, total_steps) / total_steps
return warmup_pct * peak_lr - (peak_lr - end_lr) * (1 - jnp.cos(jnp.pi * anneal_pct)) / 2
return sch | null |
162,768 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
logger = logging.getLogger(__name__)
def mb_item(x):
return x.item() if hasattr(x, "item") else x
The provided code snippet includes necessary dependencies for implementing the `save_model_checkpoint` function. Write a Python function `def save_model_checkpoint(model, save_dir, state, with_opt:bool=True, push_to_hub:bool=False)` to solve the following problem:
If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`.
Here is the function:
def save_model_checkpoint(model, save_dir, state, with_opt:bool=True, push_to_hub:bool=False):
"""
If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`.
"""
state = jax_utils.unreplicate(state)
logger.info(f"SAVING CHECKPOINT IN {save_dir}...")
if not push_to_hub:
save_dir = f"{save_dir}/ckpt-{mb_item(state.step)-1}"
model.save_pretrained(
save_dir,
params=state.params,
push_to_hub=push_to_hub,
commit_message=f"Saving weights and logs at step {mb_item(state.step)-1}",
)
if with_opt:
with open(os.path.join(save_dir, "opt_state.msgpack"), "wb") as f:
f.write(to_bytes(state.opt_state))
with open(os.path.join(save_dir, "training_state.json"), "w") as f:
json.dump({"step": state.step.item()}, f)
logger.info("checkpoint saved") | If `push_to_hub` is True, will save to `save_dir`. Otherwise will save to `save_dir/ckpt-{step}`. |
162,769 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
logger = logging.getLogger(__name__)
def restore_model_checkpoint(save_dir, state):
logger.info(f"RESTORING CHECKPOINT FROM {save_dir}...")
with open(os.path.join(save_dir, "flax_model.msgpack"), "rb") as f:
params = from_bytes(state.params, f.read())
with open(os.path.join(save_dir, "opt_state.msgpack"), "rb") as f:
opt_state = from_bytes(state.opt_state, f.read())
with open(os.path.join(save_dir, "training_state.json"), "r") as f:
training_state = json.load(f)
step = training_state["step"]
logger.info("checkpoint restored")
return state.replace(step=step, params=params, opt_state=opt_state), step | null |
162,770 | from ast import Str
import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import json
import shutil
from collections import defaultdict
from flax import training
import numpy as np
import datasets
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.profiler
import jax.numpy as jnp
import optax
import transformers
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from flax.training.checkpoints import save_checkpoint, restore_checkpoint
from flax.serialization import to_bytes, from_bytes
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
from importlib.util import find_spec
from utils import PrefetchDataloader, make_batch
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `rotate_checkpoints` function. Write a Python function `def rotate_checkpoints(ckpt_dir:str, save_total_limit:int)` to solve the following problem:
Removes older checkpoints so that `save_total_limit` checkpoints are kept
Here is the function:
def rotate_checkpoints(ckpt_dir:str, save_total_limit:int):
"Removes older checkpoints so that `save_total_limit` checkpoints are kept"
# TODO: what to remove is decided using step number only, we might want to improve that
ckpts = [str(x) for x in Path(ckpt_dir).glob("ckpt-*")]
# sort checkpoints by step
ckpts_sorted = sorted(ckpts, key=lambda x: int(x.split('-')[-1]))
ckpts_to_delete = ckpts_sorted[:-save_total_limit]
for ckpt in ckpts_to_delete:
logger.info(f"Deleting older checkpoint [{ckpt}] due to save_total_limit ({save_total_limit})")
shutil.rmtree(ckpt) | Removes older checkpoints so that `save_total_limit` checkpoints are kept |
162,771 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import datasets
import numpy as np
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from flax.core.frozen_dict import freeze, unfreeze
from flax.training.common_utils import onehot, stack_forest
from jax.experimental.maps import mesh
from jax.experimental.pjit import pjit
from partitions import set_partitions
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
The provided code snippet includes necessary dependencies for implementing the `data_loader` function. Write a Python function `def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False)` to solve the following problem:
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`.
Here is the function:
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if `shuffle` is `True`.
"""
steps_per_epoch = len(dataset) // batch_size
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
else:
batch_idx = jnp.arange(len(dataset))
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
for idx in batch_idx:
batch = dataset[idx]
batch = {k: jnp.array(v) for k, v in batch.items()}
yield batch | Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`. |
162,772 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import datasets
import numpy as np
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from flax.core.frozen_dict import freeze, unfreeze
from flax.training.common_utils import onehot, stack_forest
from jax.experimental.maps import mesh
from jax.experimental.pjit import pjit
from partitions import set_partitions
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = stack_forest(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1) | null |
162,773 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import datasets
import numpy as np
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from flax.core.frozen_dict import freeze, unfreeze
from flax.training.common_utils import onehot, stack_forest
from jax.experimental.maps import mesh
from jax.experimental.pjit import pjit
from partitions import set_partitions
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step) | null |
162,774 | import logging
import math
import os
import sys
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Callable, Optional
import datasets
import numpy as np
from datasets import Dataset, load_dataset
from tqdm import tqdm
import jax
import jax.numpy as jnp
import optax
import transformers
from flax.core.frozen_dict import freeze, unfreeze
from flax.training.common_utils import onehot, stack_forest
from jax.experimental.maps import mesh
from jax.experimental.pjit import pjit
from partitions import set_partitions
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoTokenizer,
FlaxAutoModelForCausalLM,
HfArgumentParser,
TrainingArguments,
is_tensorboard_available,
)
from transformers.testing_utils import CaptureLogger
The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_fn` function. Write a Python function `def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]` to solve the following problem:
Returns a linear warmup, linear_decay learning rate function.
Here is the function:
def create_learning_rate_fn(
train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
steps_per_epoch = train_ds_size // train_batch_size
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn | Returns a linear warmup, linear_decay learning rate function. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.