id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
23,026
import torch from kaolin import _C import wisp._C as wisp_C import kaolin.ops.spc as spc_ops class GridInterpolate(torch.autograd.Function): def forward(ctx, coords, feats): feats_out = wisp_C.ops.grid_interpolate_cuda(coords.float().contiguous(), feats.contiguous()).contiguous() ctx.save_for_backward(coords) ctx.feature_dim = feats.shape[-1] return feats_out def backward(ctx, grad_output): coords = ctx.saved_tensors[0] feature_dim = ctx.feature_dim grad_feats = wisp_C.ops.grid_interpolate_backward_cuda( coords.float().contiguous(), grad_output.contiguous(), feature_dim) return (None, grad_feats) def grid_interpolate(coords, feats): return GridInterpolate.apply(coords.contiguous(), feats.contiguous())
null
23,027
import torch from kaolin import _C import wisp._C as wisp_C import kaolin.ops.spc as spc_ops The provided code snippet includes necessary dependencies for implementing the `hashgrid_query_fwd` function. Write a Python function `def hashgrid_query_fwd(coords, resolutions, codebook_bitwidth, lod_idx, codebook, probe_bitwidth=0)` to solve the following problem: Non-differentiable version of hashgrid query. No assumptions on the typing of the codebook. Here is the function: def hashgrid_query_fwd(coords, resolutions, codebook_bitwidth, lod_idx, codebook, probe_bitwidth=0): """Non-differentiable version of hashgrid query. No assumptions on the typing of the codebook. """ batch, dim = coords.shape assert(coords.shape[-1] in [2, 3]) feats_out = wisp_C.ops.hashgrid_query_cuda(coords.float().contiguous(), codebook, resolutions, codebook_bitwidth, probe_bitwidth).contiguous() feature_dim = codebook[0].shape[1] * len(resolutions) return feats_out.reshape(batch, 8, feature_dim*(2**probe_bitwidth))
Non-differentiable version of hashgrid query. No assumptions on the typing of the codebook.
23,028
import torch from kaolin import _C import wisp._C as wisp_C import kaolin.ops.spc as spc_ops class HashGridQuery(torch.autograd.Function): def forward(ctx, coords, resolutions, codebook_bitwidth, probe_bitwidth, lod_idx, *codebook): if codebook[0].shape[-1] % 2 == 1: raise Exception("The codebook feature dimension needs to be a multiple of 2.") # TODO(ttakikawa): Make the kernel use the LOD feats_out = wisp_C.ops.hashgrid_query_cuda(coords.float().contiguous(), codebook, resolutions, codebook_bitwidth, probe_bitwidth).contiguous() ctx.save_for_backward(coords) ctx.resolutions = resolutions ctx.num_lods = len(resolutions) ctx.codebook_shapes = [_c.shape for _c in codebook] ctx.codebook_size = 2**codebook_bitwidth ctx.codebook_bitwidth = codebook_bitwidth ctx.feature_dim = codebook[0].shape[-1] ctx.probe_bitwidth = probe_bitwidth return feats_out def backward(ctx, grad_output): coords = ctx.saved_tensors[0] resolutions = ctx.resolutions codebook_size = ctx.codebook_size feature_dim = ctx.feature_dim codebook_shapes = ctx.codebook_shapes codebook_bitwidth = ctx.codebook_bitwidth probe_bitwidth = ctx.probe_bitwidth grad_codebook = wisp_C.ops.hashgrid_query_backward_cuda( coords.float().contiguous(), grad_output.contiguous(), resolutions, [c_[0] for c_ in codebook_shapes], codebook_bitwidth, feature_dim, probe_bitwidth) return (None, None, None, None, None, *grad_codebook) The provided code snippet includes necessary dependencies for implementing the `hashgrid_query` function. Write a Python function `def hashgrid_query(coords, resolutions, codebook_bitwidth, lod_idx, codebook, probe_bitwidth=0)` to solve the following problem: A hash-grid query, accelerated with CUDA. Args: coords (torch.FloatTensor): 3D coordinates of shape [batch, 3] resolutions (torch.LongTensor): the resolution of the grid per level of shape [num_lods] codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries. lod_idx (int): The LOD to aggregate to. codebook (torch.ModuleList[torch.FloatTensor]): A list of codebooks of shapes [codebook_size, feature_dim]. Returns: (torch.FloatTensor): Features of shape [batch, 8, feature_dim] Here is the function: def hashgrid_query(coords, resolutions, codebook_bitwidth, lod_idx, codebook, probe_bitwidth=0): """A hash-grid query, accelerated with CUDA. Args: coords (torch.FloatTensor): 3D coordinates of shape [batch, 3] resolutions (torch.LongTensor): the resolution of the grid per level of shape [num_lods] codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries. lod_idx (int): The LOD to aggregate to. codebook (torch.ModuleList[torch.FloatTensor]): A list of codebooks of shapes [codebook_size, feature_dim]. Returns: (torch.FloatTensor): Features of shape [batch, 8, feature_dim] """ batch, dim = coords.shape assert(coords.shape[-1] in [2, 3]) feats = HashGridQuery.apply(coords.contiguous(), resolutions, codebook_bitwidth, probe_bitwidth, lod_idx, *[_c for _c in codebook]) feature_dim = codebook[0].shape[1] * len(resolutions) return feats.reshape(batch, 8, feature_dim*(2**probe_bitwidth))
A hash-grid query, accelerated with CUDA. Args: coords (torch.FloatTensor): 3D coordinates of shape [batch, 3] resolutions (torch.LongTensor): the resolution of the grid per level of shape [num_lods] codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries. lod_idx (int): The LOD to aggregate to. codebook (torch.ModuleList[torch.FloatTensor]): A list of codebooks of shapes [codebook_size, feature_dim]. Returns: (torch.FloatTensor): Features of shape [batch, 8, feature_dim]
23,029
import torch import torch.nn.functional as F from scipy.ndimage import gaussian_filter from wisp.core import RenderBuffer, Rays The provided code snippet includes necessary dependencies for implementing the `pointlight_shadow_shader` function. Write a Python function `def pointlight_shadow_shader(rb: RenderBuffer, rays: Rays, pipeline, point_light=[1.5, 4.5, 1.5], min_y=-2.0) -> RenderBuffer` to solve the following problem: Apply shadow rays with one secondary ray towards the pointlight. Args: rb (wisp.core.RenderBuffer): The RenderBuffer. rays (wisp.core.Rays): The rays object. pipeline (wisp.core.Pipeline): The neural field. point_light (list[3] of float): Position of the point light. min_y (float): The location of the xz plane. Returns: (wisp.core.RenderBuffer): The output RenderBuffer. Here is the function: def pointlight_shadow_shader(rb: RenderBuffer, rays: Rays, pipeline, point_light=[1.5, 4.5, 1.5], min_y=-2.0) -> RenderBuffer: """Apply shadow rays with one secondary ray towards the pointlight. Args: rb (wisp.core.RenderBuffer): The RenderBuffer. rays (wisp.core.Rays): The rays object. pipeline (wisp.core.Pipeline): The neural field. point_light (list[3] of float): Position of the point light. min_y (float): The location of the xz plane. Returns: (wisp.core.RenderBuffer): The output RenderBuffer. """ rb.shadow = torch.zeros_like(rb.depth)[:, 0].bool().to(rays.origins.device) with torch.no_grad(): plane_hit = torch.zeros_like(rb.depth)[:, 0].bool().to(rays.origins.device) rate = -rays.dirs[:, 1] # check negative sign probably lol plane_hit[torch.abs(rate) < 0.00001] = False delta = rays.origins[:, 1] - min_y plane_t = delta / rate plane_hit[(plane_t > 0) & (plane_t < 500)] = True plane_hit = plane_hit & (plane_t < rb.depth[..., 0]) rb.hit = rb.hit & ~plane_hit rb.depth[plane_hit] = plane_t[plane_hit].unsqueeze(1) rb.xyz[plane_hit] = rays.origins[plane_hit] + rays.dirs[plane_hit] * plane_t[plane_hit].unsqueeze(1) rb.normal[plane_hit] = 0 rb.normal[plane_hit, 1] = 1 # x is shadow ray origin light_o = torch.FloatTensor([[point_light]]).to(rays.origins.device) shadow_ray_o = rb.xyz + 0.01 * rb.normal shadow_ray_d = torch.zeros_like(rb.xyz).normal_(0.0, 0.01) + \ light_o - shadow_ray_o shadow_ray_d = F.normalize(shadow_ray_d, dim=1)[0] light_hit = ((shadow_ray_d * rb.normal).sum(-1) > 0.0) shadow_rays = Rays(origins=shadow_ray_o, dirs=shadow_ray_d, dist_min=0, dist_max=rays.dist_max) rb.shadow = pipeline.tracer(pipeline.nef, rays=shadow_rays).hit # rb.shadow[~plane_hit] = 0.0 rb.shadow[~light_hit] = 0.0 # rb.hit = rb.hit | plane_hit shadow_map = torch.clamp((1.0 - rb.shadow.float()) + 0.7, 0.0, 1.0).cpu().numpy()[..., 0] shadow_map = torch.from_numpy(gaussian_filter(shadow_map, sigma=2)).unsqueeze(-1) rb.rgb[..., :3] *= shadow_map.cuda() return rb
Apply shadow rays with one secondary ray towards the pointlight. Args: rb (wisp.core.RenderBuffer): The RenderBuffer. rays (wisp.core.Rays): The rays object. pipeline (wisp.core.Pipeline): The neural field. point_light (list[3] of float): Position of the point light. min_y (float): The location of the xz plane. Returns: (wisp.core.RenderBuffer): The output RenderBuffer.
23,030
import os import numpy as np import torch from scipy.interpolate import RegularGridInterpolator from PIL import Image from wisp.core import RenderBuffer, Rays from wisp.ops.geometric import spherical_envmap def matcap_sampler(path, interpolate=True): """Fetches MatCap texture & converts to a interpolation function (if needed). TODO(ttakikawa): Replace this with something GPU compatible. Args: path (str): path to MatCap texture interpolate (bool): perform interpolation (default: True) Returns: (np.array) or (scipy.interpolate.Interpolator) - The matcap texture - A SciPy interpolator function to be used for CPU texture fetch. """ matcap = np.array(Image.open(path)).transpose(1, 0, 2) if interpolate: return RegularGridInterpolator((np.linspace(0, 1, matcap.shape[0]), np.linspace(0, 1, matcap.shape[1])), matcap) else: return matcap def spherical_envmap(ray_dir, normal): """Computes matcap UV-coordinates from the ray direction and normal. Args: ray_dir (torch.Tensor): incoming ray direction of shape [...., 3] normal (torch.Tensor): surface normal of shape [..., 3] Returns: (torch.FloatTensor): UV coordinates of shape [..., 2] """ # Input should be size [...,3] # Returns [N,2] # Might want to make this [...,2] # TODO(ttakikawa): Probably should implement all this on GPU ray_dir_screen = ray_dir.clone() ray_dir_screen[..., 2] *= -1 ray_dir_normal_dot = torch.sum(normal * ray_dir_screen, dim=-1, keepdim=True) r = ray_dir_screen - 2.0 * ray_dir_normal_dot * normal r[..., 2] -= 1.0 m = 2.0 * torch.sqrt(torch.sum(r ** 2, dim=-1, keepdim=True)) vN = (r[..., :2] / m) + 0.5 vN = 1.0 - vN vN = vN[..., :2].reshape(-1, 2) vN = torch.clip(vN, 0.0, 1.0) vN[torch.isnan(vN)] = 0 return vN The provided code snippet includes necessary dependencies for implementing the `matcap_shader` function. Write a Python function `def matcap_shader(rb: RenderBuffer, rays: Rays, matcap_path, mm=None) -> RenderBuffer` to solve the following problem: Apply matcap shading. Args: rb (wisp.core.RenderBuffer): The RenderBuffer. rays (wisp.core.Rays): The rays object. matcap_path (str): Path to a matcap. mm (torch.FloatTensor): A 3x3 rotation matrix. Returns: (wisp.core.RenderBuffer): The output RenderBuffer. Here is the function: def matcap_shader(rb: RenderBuffer, rays: Rays, matcap_path, mm=None) -> RenderBuffer: """Apply matcap shading. Args: rb (wisp.core.RenderBuffer): The RenderBuffer. rays (wisp.core.Rays): The rays object. matcap_path (str): Path to a matcap. mm (torch.FloatTensor): A 3x3 rotation matrix. Returns: (wisp.core.RenderBuffer): The output RenderBuffer. """ if not os.path.exists(matcap_path): raise Exception(f"The path [{matcap_path}] does not exist. Check your working directory or use an absolute path to the matcap with --matcap-path") # TODO: Write a GPU version of the sampler... matcap = matcap_sampler(matcap_path) matcap_normal = rb.normal.clone() matcap_view = rays.dirs.clone() if mm is not None: mm = mm.to(matcap_normal.device) #matcap_normal = torch.mm(matcap_normal.reshape(-1, 3), mm.transpose(1,0)) #matcap_normal = matcap_normal.reshape(self.width, self.height, 3) shape = matcap_view.shape matcap_view = torch.mm(matcap_view.reshape(-1, 3), mm.transpose(1,0)) matcap_view = matcap_view.reshape(*shape) vN = spherical_envmap(matcap_view, matcap_normal).cpu().numpy() rb.rgb = torch.FloatTensor(matcap(vN)[...,:3].reshape(*matcap_view.shape)).to(matcap_normal.device) / 255.0 return rb
Apply matcap shading. Args: rb (wisp.core.RenderBuffer): The RenderBuffer. rays (wisp.core.Rays): The rays object. matcap_path (str): Path to a matcap. mm (torch.FloatTensor): A 3x3 rotation matrix. Returns: (wisp.core.RenderBuffer): The output RenderBuffer.
23,031
import torch import numpy as np from .barycentric_coordinates import barycentric_coordinates from .closest_point import closest_point from .sample_tex import sample_tex def barycentric_coordinates( points : torch.Tensor, A : torch.Tensor, B : torch.Tensor, C : torch.Tensor): """ Return barycentric coordinates for a given set of points and triangle vertices Args: points (torch.FloatTensor): [N, 3] A (torch.FloatTensor): [N, 3] vertex0 B (torch.FloatTensor): [N, 3] vertex1 C (torch.FloatTensor): [N, 3] vertex2 Returns: (torch.FloatTensor): barycentric coordinates of [N, 2] """ v0 = B-A v1 = C-A v2 = points-A d00 = (v0*v0).sum(dim=-1) d01 = (v0*v1).sum(dim=-1) d11 = (v1*v1).sum(dim=-1) d20 = (v2*v0).sum(dim=-1) d21 = (v2*v1).sum(dim=-1) denom = d00*d11 - d01*d01 L = torch.zeros(points.shape[0], 3, device=points.device) # Warning: This clipping may cause undesired behaviour L[...,1] = torch.clip((d11*d20 - d01*d21)/denom, 0.0, 1.0) L[...,2] = torch.clip((d00*d21 - d01*d20)/denom, 0.0, 1.0) L[...,0] = torch.clip(1.0 - (L[...,1] + L[...,2]), 0.0, 1.0) return L def closest_point( V : torch.Tensor, F : torch.Tensor, points : torch.Tensor, split_size : int = 10**6): """Computes points on mesh which is closest to the input points Args: V (torch.FloatTensor): [#V, 3] array of vertices F (torch.LongTensor): [#F, 3] array of indices points (torch.FloatTensor): [N, 3] array of points to sample split_size (int): The batch at which the SDF will be computed. The kernel will break for too large batches; when in doubt use the default. Returns: (torch.FloatTensor): [N,] array of computed SDF values. (torch.FloatTensor): [N, 3] array of closest points (torch.FloatTensor): [N,] array of closest triangle indices """ # If not using double, accumulated error can be large and degrade model performance. V = V.double() points = points.double() mesh = V[F] _points = torch.split(points, split_size) split_len = len(_points) dists = [] points = [] triangles_idx = [] for split_idx, _p in enumerate(_points): print(f"Processing closest_point()... this may take up few minutes. [{split_idx + 1}/{split_len}]") # gets sdf and triangle closest to the point _p out = _C.external.mesh_to_sdf_triangle_cuda(_p.cuda().contiguous(), mesh.cuda().contiguous())[0] out_len = out.shape[0] half_len = int(out_len / 2) dist = out[:half_len] # distance to closest triangle (= sdf) hit_tidx = out[half_len:].type(torch.long) # closest triangle index # calculate (point on the triangle) which is closest to point _p hit_pts = closest_point_on_triangle(mesh.index_select(dim=0, index=hit_tidx).cuda().contiguous(), _p.cuda().contiguous()) dists.append(dist) points.append(hit_pts) triangles_idx.append(hit_tidx) return torch.cat(dists), torch.cat(points), torch.cat(triangles_idx) def sample_tex( Tp : torch.Tensor, # points [N ,2] TM : torch.Tensor, # material indices [N] materials): """Sample from a texture. Args: Tp (torch.FloatTensor): 2D coordinates to sample of shape [N, 2] TM (torch.LongTensor): Indices of the material to sample of shape [N] materials (list of material): Materials Returns: (torch.FloatTensor): RGB samples of shape [N, 3] """ max_idx = TM.max() assert(max_idx > -1 and "No materials detected! Check the material definiton on your mesh.") rgb = torch.zeros(Tp.shape[0], 3, device=Tp.device) Tp = (Tp * 2.0) - 1.0 # The y axis is flipped from what UV maps generally expects vs in PyTorch Tp[...,1] *= -1 for i in range(max_idx+1): mask = (TM == i) if mask.sum() == 0: continue if 'diffuse_texname' not in materials[i]: if 'diffuse' in materials[i]: rgb[mask] = materials[i]['diffuse'].to(Tp.device) continue map = materials[i]['diffuse_texname'][...,:3].permute(2, 0, 1)[None].to(Tp.device) grid = Tp[mask] grid = grid.reshape(1, grid.shape[0], 1, grid.shape[1]) _rgb = F.grid_sample(map, grid, mode='bilinear', padding_mode='reflection', align_corners=True) _rgb = _rgb[0,:,:,0].permute(1,0) rgb[mask] = _rgb return rgb The provided code snippet includes necessary dependencies for implementing the `closest_tex` function. Write a Python function `def closest_tex( V : torch.Tensor, F : torch.Tensor, TV : torch.Tensor, TF : torch.Tensor, materials, points : torch.Tensor)` to solve the following problem: Returns the closest texture for a set of points. V (torch.FloatTensor): mesh vertices of shape [V, 3] F (torch.LongTensor): mesh face indices of shape [F, 3] TV (torch.FloatTensor): TF (torch.FloatTensor): materials: points (torch.FloatTensor): sample locations of shape [N, 3] Returns: (torch.FloatTensor): texture samples of shape [N, 3] Here is the function: def closest_tex( V : torch.Tensor, F : torch.Tensor, TV : torch.Tensor, TF : torch.Tensor, materials, points : torch.Tensor): """Returns the closest texture for a set of points. V (torch.FloatTensor): mesh vertices of shape [V, 3] F (torch.LongTensor): mesh face indices of shape [F, 3] TV (torch.FloatTensor): TF (torch.FloatTensor): materials: points (torch.FloatTensor): sample locations of shape [N, 3] Returns: (torch.FloatTensor): texture samples of shape [N, 3] """ TV = TV.to(V.device) TF = TF.to(V.device) points = points.to(V.device) dist, hit_pts, hit_tidx = closest_point(V, F, points) hit_F = F[hit_tidx] hit_V = V[hit_F] BC = barycentric_coordinates(hit_pts.cuda(), hit_V[:,0], hit_V[:,1], hit_V[:,2]) hit_TF = TF[hit_tidx] hit_TM = hit_TF[...,3] hit_TF = hit_TF[...,:3] if TV.shape[0] > 0: hit_TV = TV[hit_TF] hit_Tp = (hit_TV * BC.unsqueeze(-1)).sum(1) else: hit_Tp = BC rgb = sample_tex(hit_Tp, hit_TM, materials) return rgb, hit_pts, dist
Returns the closest texture for a set of points. V (torch.FloatTensor): mesh vertices of shape [V, 3] F (torch.LongTensor): mesh face indices of shape [F, 3] TV (torch.FloatTensor): TF (torch.FloatTensor): materials: points (torch.FloatTensor): sample locations of shape [N, 3] Returns: (torch.FloatTensor): texture samples of shape [N, 3]
23,032
import os import sys import numpy as np import tinyobjloader import torch from PIL import Image import logging as log texopts = [ 'ambient_texname', 'diffuse_texname', 'specular_texname', 'specular_highlight_texname', 'bump_texname', 'displacement_texname', 'alpha_texname', 'reflection_texname', 'roughness_texname', 'metallic_texname', 'sheen_texname', 'emissive_texname', 'normal_texname' ] def load_mat(fname : str): """Loads material. """ img = torch.FloatTensor(np.array(Image.open(fname))) img = img / 255.0 return img The provided code snippet includes necessary dependencies for implementing the `load_obj` function. Write a Python function `def load_obj( fname : str, load_materials : bool = False)` to solve the following problem: Load .obj file using TinyOBJ and extract info. This is more robust since it can triangulate polygon meshes with up to 255 sides per face. Args: fname (str): path to Wavefront .obj file Here is the function: def load_obj( fname : str, load_materials : bool = False): """Load .obj file using TinyOBJ and extract info. This is more robust since it can triangulate polygon meshes with up to 255 sides per face. Args: fname (str): path to Wavefront .obj file """ assert os.path.exists(fname), \ 'Invalid file path and/or format, must be an existing Wavefront .obj' reader = tinyobjloader.ObjReader() config = tinyobjloader.ObjReaderConfig() config.triangulate = True # Ensure we don't have any polygons reader.ParseFromFile(fname, config) # Get vertices attrib = reader.GetAttrib() vertices = torch.FloatTensor(attrib.vertices).reshape(-1, 3) # Get triangle face indices shapes = reader.GetShapes() faces = [] for shape in shapes: faces += [idx.vertex_index for idx in shape.mesh.indices] faces = torch.LongTensor(faces).reshape(-1, 3) mats = {} if load_materials: # Load per-faced texture coordinate indices texf = [] matf = [] for shape in shapes: texf += [idx.texcoord_index for idx in shape.mesh.indices] matf.extend(shape.mesh.material_ids) # texf stores [tex_idx0, tex_idx1, tex_idx2, mat_idx] texf = torch.LongTensor(texf).reshape(-1, 3) matf = torch.LongTensor(matf).reshape(-1, 1) texf = torch.cat([texf, matf], dim=-1) # Load texcoords texv = torch.FloatTensor(attrib.texcoords).reshape(-1, 2) # Load texture maps parent_path = os.path.dirname(fname) materials = reader.GetMaterials() for i, material in enumerate(materials): mats[i] = {} diffuse = getattr(material, 'diffuse') if diffuse != '': mats[i]['diffuse'] = torch.FloatTensor(diffuse) for texopt in texopts: mat_path = getattr(material, texopt) if mat_path != '': img = load_mat(os.path.join(parent_path, mat_path)) mats[i][texopt] = img #mats[i][texopt.split('_')[0]] = img return vertices, faces, texv, texf, mats return vertices, faces
Load .obj file using TinyOBJ and extract info. This is more robust since it can triangulate polygon meshes with up to 255 sides per face. Args: fname (str): path to Wavefront .obj file
23,033
import torch The provided code snippet includes necessary dependencies for implementing the `normalize` function. Write a Python function `def normalize( V : torch.Tensor, F : torch.Tensor, mode : str)` to solve the following problem: Normalizes a mesh. Args: V (torch.FloatTensor): Vertices of shape [V, 3] F (torch.LongTensor): Faces of shape [F, 3] mode (str): Different methods of normalization. Returns: (torch.FloatTensor, torch.LongTensor): - Normalized Vertices - Faces Here is the function: def normalize( V : torch.Tensor, F : torch.Tensor, mode : str): """Normalizes a mesh. Args: V (torch.FloatTensor): Vertices of shape [V, 3] F (torch.LongTensor): Faces of shape [F, 3] mode (str): Different methods of normalization. Returns: (torch.FloatTensor, torch.LongTensor): - Normalized Vertices - Faces """ if mode == 'sphere': V_max, _ = torch.max(V, dim=0) V_min, _ = torch.min(V, dim=0) V_center = (V_max + V_min) / 2. V = V - V_center # Find the max distance to origin max_dist = torch.sqrt(torch.max(torch.sum(V**2, dim=-1))) V_scale = 1. / max_dist V *= V_scale return V, F elif mode == 'aabb': V_min, _ = torch.min(V, dim=0) V = V - V_min max_dist = torch.max(V) V *= 1.0 / max_dist V = V * 2.0 - 1.0 return V, F elif mode == 'planar': V_min, _ = torch.min(V, dim=0) V = V - V_min x_max = torch.max(V[...,0]) z_max = torch.max(V[...,2]) V[...,0] *= 1.0 / x_max V[...,2] *= 1.0 / z_max max_dist = torch.max(V) V[...,1] *= 1.0 / max_dist #V *= 1.0 / max_dist V = V * 2.0 - 1.0 y_min = torch.min(V[...,1]) V[...,1] -= y_min return V, F elif mode == 'none': return V, F
Normalizes a mesh. Args: V (torch.FloatTensor): Vertices of shape [V, 3] F (torch.LongTensor): Faces of shape [F, 3] mode (str): Different methods of normalization. Returns: (torch.FloatTensor, torch.LongTensor): - Normalized Vertices - Faces
23,034
import math import contextlib import os import sys import torch import numpy as np import wisp._C as _C The provided code snippet includes necessary dependencies for implementing the `compute_sdf` function. Write a Python function `def compute_sdf( V : torch.Tensor, F : torch.Tensor, points : torch.Tensor, split_size : int = 10**6)` to solve the following problem: Computes SDF given point samples and a mesh. Args: V (torch.FloatTensor): #V, 3 array of vertices F (torch.LongTensor): #F, 3 array of indices points (torch.FloatTensor): [N, 3] array of points to sample split_size (int): The batch at which the SDF will be computed. The kernel will break for too large batches; when in doubt use the default. Returns: (torch.FloatTensor): [N, 1] array of computed SDF values. Here is the function: def compute_sdf( V : torch.Tensor, F : torch.Tensor, points : torch.Tensor, split_size : int = 10**6): """Computes SDF given point samples and a mesh. Args: V (torch.FloatTensor): #V, 3 array of vertices F (torch.LongTensor): #F, 3 array of indices points (torch.FloatTensor): [N, 3] array of points to sample split_size (int): The batch at which the SDF will be computed. The kernel will break for too large batches; when in doubt use the default. Returns: (torch.FloatTensor): [N, 1] array of computed SDF values. """ mesh = V[F] _points = torch.split(points, split_size) sdfs = [] for _p in _points: sdfs.append(_C.external.mesh_to_sdf_cuda(_p.cuda().contiguous(), mesh.cuda().contiguous())[0]) return torch.cat(sdfs)[...,None]
Computes SDF given point samples and a mesh. Args: V (torch.FloatTensor): #V, 3 array of vertices F (torch.LongTensor): #F, 3 array of indices points (torch.FloatTensor): [N, 3] array of points to sample split_size (int): The batch at which the SDF will be computed. The kernel will break for too large batches; when in doubt use the default. Returns: (torch.FloatTensor): [N, 1] array of computed SDF values.
23,035
import torch from .sample_near_surface import sample_near_surface from .sample_surface import sample_surface from .sample_uniform import sample_uniform from .area_weighted_distribution import area_weighted_distribution def sample_near_surface( V : torch.Tensor, F : torch.Tensor, num_samples: int, variance : float = 0.01, distrib=None): """Sample points near the mesh surface. Args: V (torch.Tensor): #V, 3 array of vertices F (torch.Tensor): #F, 3 array of indices num_samples (int): number of surface samples distrib: distribution to use. By default, area-weighted distribution is used Returns: (torch.FloatTensor): samples of shape [num_samples, 3] """ if distrib is None: distrib = area_weighted_distribution(V, F) samples = sample_surface(V, F, num_samples, distrib)[0] samples += torch.randn_like(samples) * variance return samples def sample_surface( V : torch.Tensor, F : torch.Tensor, num_samples : int, distrib = None): """Sample points and their normals on mesh surface. Args: V (torch.Tensor): #V, 3 array of vertices F (torch.Tensor): #F, 3 array of indices num_samples (int): number of surface samples distrib: distribution to use. By default, area-weighted distribution is used Returns: (torch.FloatTensor): samples of shape [num_samples, 3] """ if distrib is None: distrib = area_weighted_distribution(V, F) # Select faces & sample their surface fidx, normals = random_face(V, F, num_samples, distrib) f = V[fidx] u = torch.sqrt(torch.rand(num_samples)).to(V.device).unsqueeze(-1) v = torch.rand(num_samples).to(V.device).unsqueeze(-1) samples = (1 - u) * f[:,0,:] + (u * (1 - v)) * f[:,1,:] + u * v * f[:,2,:] return samples, normals def sample_uniform(num_samples : int): """Sample uniformly in [-1,1] bounding volume. Args: num_samples(int) : number of points to sample Returns: (torch.FloatTensor): samples of shape [num_samples, 3] """ return torch.rand(num_samples, 3) * 2.0 - 1.0 def area_weighted_distribution( V : torch.Tensor, F : torch.Tensor, normals : torch.Tensor = None): """Construct discrete area weighted distribution over triangle mesh. Args: V (torch.Tensor): #V, 3 array of vertices F (torch.Tensor): #F, 3 array of indices normals (torch.Tensor): normals (if precomputed) eps (float): epsilon Returns: (torch.distributions): Distribution to be used """ if normals is None: normals = per_face_normals(V, F) areas = torch.norm(normals, p=2, dim=1) * 0.5 areas /= torch.sum(areas) + 1e-10 # Discrete PDF over triangles return torch.distributions.Categorical(areas.view(-1)) The provided code snippet includes necessary dependencies for implementing the `point_sample` function. Write a Python function `def point_sample( V : torch.Tensor, F : torch.Tensor, techniques : list, num_samples : int)` to solve the following problem: Sample points from a mesh. Args: V (torch.Tensor): #V, 3 array of vertices F (torch.Tensor): #F, 3 array of indices techniques (list[str]): list of techniques to sample with num_samples (int): points to sample per technique Returns: (torch.FloatTensor): Samples of shape [len(techniques)*num_samples, 3] Here is the function: def point_sample( V : torch.Tensor, F : torch.Tensor, techniques : list, num_samples : int): """Sample points from a mesh. Args: V (torch.Tensor): #V, 3 array of vertices F (torch.Tensor): #F, 3 array of indices techniques (list[str]): list of techniques to sample with num_samples (int): points to sample per technique Returns: (torch.FloatTensor): Samples of shape [len(techniques)*num_samples, 3] """ if 'trace' in techniques or 'near' in techniques: # Precompute face distribution distrib = area_weighted_distribution(V, F) samples = [] for technique in techniques: if technique =='trace': samples.append(sample_surface(V, F, num_samples, distrib=distrib)[0]) elif technique == 'near': samples.append(sample_near_surface(V, F, num_samples, distrib=distrib)) elif technique == 'rand': samples.append(sample_uniform(num_samples).to(V.device)) samples = torch.cat(samples, dim=0) return samples
Sample points from a mesh. Args: V (torch.Tensor): #V, 3 array of vertices F (torch.Tensor): #F, 3 array of indices techniques (list[str]): list of techniques to sample with num_samples (int): points to sample per technique Returns: (torch.FloatTensor): Samples of shape [len(techniques)*num_samples, 3]
23,036
import cv2 import torch The provided code snippet includes necessary dependencies for implementing the `srgb_to_linear` function. Write a Python function `def srgb_to_linear(img)` to solve the following problem: Converts from SRGB to Linear colorspace. Args: img (torch.FloatTensor): SRGB image. Returns: (torch.FloatTensor): Linear image. Here is the function: def srgb_to_linear(img): """Converts from SRGB to Linear colorspace. Args: img (torch.FloatTensor): SRGB image. Returns: (torch.FloatTensor): Linear image. """ limit = 0.04045 return torch.where(img > limit, torch.power((img + 0.055) / 1.055, 2.4), img / 12.92)
Converts from SRGB to Linear colorspace. Args: img (torch.FloatTensor): SRGB image. Returns: (torch.FloatTensor): Linear image.
23,037
import cv2 import torch The provided code snippet includes necessary dependencies for implementing the `linear_to_srgb` function. Write a Python function `def linear_to_srgb(img)` to solve the following problem: Converts from Linear to SRGB colorspace. Args: img (torch.FloatTensor): Linear image. Returns: (torch.FloatTensor): SRGB image. Here is the function: def linear_to_srgb(img): """Converts from Linear to SRGB colorspace. Args: img (torch.FloatTensor): Linear image. Returns: (torch.FloatTensor): SRGB image. """ limit = 0.0031308 img = torch.where(img > limit, 1.055 * (img ** (1.0 / 2.4)) - 0.055, 12.92 * img) img[img > 1] = 1 return img
Converts from Linear to SRGB colorspace. Args: img (torch.FloatTensor): Linear image. Returns: (torch.FloatTensor): SRGB image.
23,038
import cv2 import torch The provided code snippet includes necessary dependencies for implementing the `resize_mip` function. Write a Python function `def resize_mip(img, mip, interpolation=cv2.INTER_LINEAR)` to solve the following problem: Resize image with cv2. Args: img (torch.FloatTensor): Image of shape [H, W, 3] mip (int): Rescaling factor. Will rescale by 2**mip. interpolation: Interpolation modes used by `cv2.resize`. Returns: (torch.FloatTensor): Rescaled image of shape [H/(2**mip), W/(2**mip), 3] Here is the function: def resize_mip(img, mip, interpolation=cv2.INTER_LINEAR): """Resize image with cv2. Args: img (torch.FloatTensor): Image of shape [H, W, 3] mip (int): Rescaling factor. Will rescale by 2**mip. interpolation: Interpolation modes used by `cv2.resize`. Returns: (torch.FloatTensor): Rescaled image of shape [H/(2**mip), W/(2**mip), 3] """ resize_factor = 2**mip # WARNING: cv2 expects (w,h) for the shape. God knows why :) shape = (int(img.shape[1] // resize_factor), int(img.shape[0] // resize_factor)) img = cv2.resize(img, dsize=shape, interpolation=interpolation) return img
Resize image with cv2. Args: img (torch.FloatTensor): Image of shape [H, W, 3] mip (int): Rescaling factor. Will rescale by 2**mip. interpolation: Interpolation modes used by `cv2.resize`. Returns: (torch.FloatTensor): Rescaled image of shape [H/(2**mip), W/(2**mip), 3]
23,039
import os import glob import numpy as np import torch import torchvision The provided code snippet includes necessary dependencies for implementing the `write_exr` function. Write a Python function `def write_exr(path, data)` to solve the following problem: Writes an EXR image to some path. Data is a dict of form { "default" = rgb_array, "depth" = depth_array } Args: path (str): Path to save the EXR data (dict): Dictionary of EXR buffers. Returns: (void): Writes to path. Here is the function: def write_exr(path, data): """Writes an EXR image to some path. Data is a dict of form { "default" = rgb_array, "depth" = depth_array } Args: path (str): Path to save the EXR data (dict): Dictionary of EXR buffers. Returns: (void): Writes to path. """ try: import pyexr except: raise Exception( "Module pyexr is not available. To install, run `pip install pyexr`. " "You will likely also need `libopenexr`, which through apt you can install with " "`apt-get install libopenexr-dev` and on Windows you can install with " "`pipwin install openexr`.") pyexr.write(path, data, channel_names={'normal': ['X', 'Y', 'Z'], 'x': ['X', 'Y', 'Z'], 'view': ['X', 'Y', 'Z']}, precision=pyexr.HALF)
Writes an EXR image to some path. Data is a dict of form { "default" = rgb_array, "depth" = depth_array } Args: path (str): Path to save the EXR data (dict): Dictionary of EXR buffers. Returns: (void): Writes to path.
23,040
import os import glob import numpy as np import torch import torchvision def hwc_to_chw(img): """Converts [H,W,C] to [C,H,W] for TensorBoard output. Args: img (torch.Tensor): [H,W,C] image. Returns: (torch.Tensor): [C,H,W] image. """ return img.permute(2, 0, 1) The provided code snippet includes necessary dependencies for implementing the `write_png` function. Write a Python function `def write_png(path, data)` to solve the following problem: Writes an PNG image to some path. Args: path (str): Path to save the PNG. data (np.array): HWC image. Returns: (void): Writes to path. Here is the function: def write_png(path, data): """Writes an PNG image to some path. Args: path (str): Path to save the PNG. data (np.array): HWC image. Returns: (void): Writes to path. """ torchvision.io.write_png(hwc_to_chw(data), path)
Writes an PNG image to some path. Args: path (str): Path to save the PNG. data (np.array): HWC image. Returns: (void): Writes to path.
23,041
import os import glob import numpy as np import torch import torchvision The provided code snippet includes necessary dependencies for implementing the `glob_imgs` function. Write a Python function `def glob_imgs(path, exts=['*.png', '*.PNG', '*.jpg', '*.jpeg', '*.JPG', '*.JPEG'])` to solve the following problem: Utility to find images in some path. Args: path (str): Path to search images in. exts (list of str): List of extensions to try. Returns: (list of str): List of paths that were found. Here is the function: def glob_imgs(path, exts=['*.png', '*.PNG', '*.jpg', '*.jpeg', '*.JPG', '*.JPEG']): """Utility to find images in some path. Args: path (str): Path to search images in. exts (list of str): List of extensions to try. Returns: (list of str): List of paths that were found. """ imgs = [] for ext in exts: imgs.extend(glob.glob(os.path.join(path, ext))) return imgs
Utility to find images in some path. Args: path (str): Path to search images in. exts (list of str): List of extensions to try. Returns: (list of str): List of paths that were found.
23,042
import os import glob import numpy as np import torch import torchvision def chw_to_hwc(img): """Converts [C,H,W] to [H,W,C]. Args: img (torch.Tensor): [C,H,W] image. Returns: (torch.Tensor): [H,W,C] image. """ return img.permute(1, 2, 0) The provided code snippet includes necessary dependencies for implementing the `load_rgb` function. Write a Python function `def load_rgb(path, normalize=True)` to solve the following problem: Loads an image. Args: path (str): Path to the image. noramlize (bool): If True, will return [0,1] floating point values. Otherwise returns [0,255] ints. Returns: (np.array): Image as an array of shape [H,W,C] Here is the function: def load_rgb(path, normalize=True): """Loads an image. Args: path (str): Path to the image. noramlize (bool): If True, will return [0,1] floating point values. Otherwise returns [0,255] ints. Returns: (np.array): Image as an array of shape [H,W,C] """ img = torchvision.io.read_image(path) if normalize: img = img.float() / 255.0 return np.array(chw_to_hwc(img))
Loads an image. Args: path (str): Path to the image. noramlize (bool): If True, will return [0,1] floating point values. Otherwise returns [0,255] ints. Returns: (np.array): Image as an array of shape [H,W,C]
23,043
import skimage import skimage.metrics import numpy as np import torch The provided code snippet includes necessary dependencies for implementing the `psnr` function. Write a Python function `def psnr(rgb, gts)` to solve the following problem: Calculate the PSNR metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The PSNR score Here is the function: def psnr(rgb, gts): """Calculate the PSNR metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The PSNR score """ assert (rgb.max() <= 1.05 and rgb.min() >= -0.05) assert (gts.max() <= 1.05 and gts.min() >= -0.05) assert (rgb.shape[-1] == 3) assert (gts.shape[-1] == 3) mse = torch.mean((rgb[..., :3] - gts[..., :3]) ** 2).item() return 10 * np.log10(1.0 / mse)
Calculate the PSNR metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The PSNR score
23,044
import skimage import skimage.metrics import numpy as np import torch The provided code snippet includes necessary dependencies for implementing the `lpips` function. Write a Python function `def lpips(rgb, gts, lpips_model=None)` to solve the following problem: Calculate the LPIPS metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The LPIPS score Here is the function: def lpips(rgb, gts, lpips_model=None): """Calculate the LPIPS metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The LPIPS score """ try: from lpips import LPIPS except: raise Exception( "Module lpips not available. To install, run `pip install lpips`") assert (rgb.max() <= 1.05 and rgb.min() >= -0.05) assert (gts.max() <= 1.05 and gts.min() >= -0.05) assert (rgb.shape[-1] == 3) assert (gts.shape[-1] == 3) if lpips_model is None: lpips_model = LPIPS(net='vgg').cuda() return lpips_model( (2.0 * rgb[..., :3] - 1.0).permute(2, 0, 1), (2.0 * gts[..., :3] - 1.0).permute(2, 0, 1)).mean().item()
Calculate the LPIPS metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The LPIPS score
23,045
import skimage import skimage.metrics import numpy as np import torch The provided code snippet includes necessary dependencies for implementing the `ssim` function. Write a Python function `def ssim(rgb, gts)` to solve the following problem: Calculate the SSIM metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The SSIM score Here is the function: def ssim(rgb, gts): """Calculate the SSIM metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The SSIM score """ assert (rgb.max() <= 1.05 and rgb.min() >= -0.05) assert (gts.max() <= 1.05 and gts.min() >= -0.05) return skimage.metrics.structural_similarity( rgb[..., :3].cpu().numpy(), gts[..., :3].cpu().numpy(), data_range=1, gaussian_weights=True, sigma=1.5, channel_axis=-1)
Calculate the SSIM metric. Assumes the RGB image is in [0,1] Args: rgb (torch.FloatTensor): Image tensor of shape [H,W,3] gts (torch.FloatTensor): Image tensor of shape [H,W,3] Returns: (float): The SSIM score
23,046
import numpy as np import torch import wisp._C as _C The provided code snippet includes necessary dependencies for implementing the `find_depth_bound` function. Write a Python function `def find_depth_bound(query, nug_depth, info, curr_idxes=None)` to solve the following problem: r"""Associate query points to the closest depth bound in-order. TODO: Document the input. Here is the function: def find_depth_bound(query, nug_depth, info, curr_idxes=None): r"""Associate query points to the closest depth bound in-order. TODO: Document the input. """ if curr_idxes is None: curr_idxes = torch.nonzero(info).contiguous() return _C.render.find_depth_bound_cuda(query.contiguous(), curr_idxes.contiguous(), nug_depth.contiguous())
r"""Associate query points to the closest depth bound in-order. TODO: Document the input.
23,047
import numpy as np import torch import wisp._C as _C The provided code snippet includes necessary dependencies for implementing the `sample_unif_sphere` function. Write a Python function `def sample_unif_sphere(n)` to solve the following problem: Sample uniformly random points on a sphere. Args: n (int): Number of samples. Returns: (np.array): Positions of shape [n, 3] Here is the function: def sample_unif_sphere(n): """Sample uniformly random points on a sphere. Args: n (int): Number of samples. Returns: (np.array): Positions of shape [n, 3] """ u = np.random.rand(2, n) z = 1 - 2*u[0,:] r = np.sqrt(1. - z * z) phi = 2 * np.pi * u[1,:] xyz = np.array([r * np.cos(phi), r * np.sin(phi), z]).transpose() return xyz
Sample uniformly random points on a sphere. Args: n (int): Number of samples. Returns: (np.array): Positions of shape [n, 3]
23,048
import numpy as np import torch import wisp._C as _C The provided code snippet includes necessary dependencies for implementing the `sample_fib_sphere` function. Write a Python function `def sample_fib_sphere(n)` to solve the following problem: Evenly distributed points on sphere using Fibonnaci sequence. From <http://extremelearning.com.au/evenly-distributing-points-on-a-sphere> WARNING: Order is not randomized. Args: n (int): Number of samples. Returns: (np.array): Positions of shape [n, 3] Here is the function: def sample_fib_sphere(n): """ Evenly distributed points on sphere using Fibonnaci sequence. From <http://extremelearning.com.au/evenly-distributing-points-on-a-sphere> WARNING: Order is not randomized. Args: n (int): Number of samples. Returns: (np.array): Positions of shape [n, 3] """ i = np.arange(0, n, dtype=float) + 0.5 phi = np.arccos(1 - 2*i/n) golden_ratio = (1 + 5**0.5)/2 theta = 2. * np.pi * i / golden_ratio xyz = np.array([np.cos(theta) * np.sin(phi), np.sin(theta) * np.sin(phi), np.cos(phi)]).transpose() return xyz
Evenly distributed points on sphere using Fibonnaci sequence. From <http://extremelearning.com.au/evenly-distributing-points-on-a-sphere> WARNING: Order is not randomized. Args: n (int): Number of samples. Returns: (np.array): Positions of shape [n, 3]
23,049
import numpy as np import torch import wisp._C as _C def normalized_grid(height, width, jitter=False, device='cuda', use_aspect=True): """Returns grid[x,y] -> coordinates for a normalized window. This is generally confusing and terrible, but in the [XYZ] 3D space, the width generally corresponds to the XZ axis and the height corresponds to the Y axis. So in the normalized [XY] space, width also corresponds to the X and the height corresponds to Y. However, PyTorch follows HW ordering. So that means, given a [HW] image, the [H+1, W] coordinate will see an increase in the Y axis (the 2nd axis) of the actual global coordinates. Args: height (int): grid height width (int): grid width jitter (bool): If True, will jitter the coordinates. device (str): Device to allocate the grid on. use_aspect (bool): If True, will scale the coords by the aspect ratio. Returns: (torch.FloatTensor): Coords tensor of shape [H, W, 2] """ window_x = torch.linspace(-1, 1, steps=width, device=device) window_y = torch.linspace(1, -1, steps=height, device=device) if jitter: window_x += (2.0 * torch.rand(*window_x.shape, device=device) - 1.0) * (1. / width) window_y += (2.0 * torch.rand(*window_y.shape, device=device) - 1.0) * (1. / height) if use_aspect: if width > height: window_x = window_x * (width / height) elif height > width: window_y = window_y * (height / width) coord = torch.stack(torch.meshgrid(window_x, window_y)).permute(2, 1, 0) # torch>=1.10: indexing='ij' (default) return coord The provided code snippet includes necessary dependencies for implementing the `normalized_slice` function. Write a Python function `def normalized_slice(height, width, dim=0, depth=0.0, device='cuda')` to solve the following problem: Returns a set of 3D coordinates for a slicing plane. Args: height (int): Grid height. width (int): Grid width. dim (int): Dimension to slice along. depth (float): The depth (from the 0 on the axis) for which the slicing will happen. device (str): Device to allocate the grid on. Returns: (torch.FloatTensor): Coords tensor of shape [height, width, 3]. Here is the function: def normalized_slice(height, width, dim=0, depth=0.0, device='cuda'): """Returns a set of 3D coordinates for a slicing plane. Args: height (int): Grid height. width (int): Grid width. dim (int): Dimension to slice along. depth (float): The depth (from the 0 on the axis) for which the slicing will happen. device (str): Device to allocate the grid on. Returns: (torch.FloatTensor): Coords tensor of shape [height, width, 3]. """ window = normalized_grid(height, width, device) depth_pts = torch.ones(height, width, 1, device=device) * depth if dim == 0: pts = torch.cat([depth_pts, window[..., 0:1], window[..., 1:2]], dim=-1) elif dim == 1: pts = torch.cat([window[..., 0:1], depth_pts, window[..., 1:2]], dim=-1) elif dim == 2: pts = torch.cat([window[..., 0:1], window[..., 1:2], depth_pts], dim=-1) else: assert (False, "dim is invalid!") pts[..., 1] *= -1 return pts
Returns a set of 3D coordinates for a slicing plane. Args: height (int): Grid height. width (int): Grid width. dim (int): Dimension to slice along. depth (float): The depth (from the 0 on the axis) for which the slicing will happen. device (str): Device to allocate the grid on. Returns: (torch.FloatTensor): Coords tensor of shape [height, width, 3].
23,050
import numpy as np import torch import wisp._C as _C The provided code snippet includes necessary dependencies for implementing the `spherical_envmap_numpy` function. Write a Python function `def spherical_envmap_numpy(ray_dir, normal)` to solve the following problem: Computes matcap UV-coordinates from the ray direction and normal. Args: ray_dir (torch.Tensor): incoming ray direction of shape [...., 3] normal (torch.Tensor): surface normal of shape [..., 3] Returns: (torch.FloatTensor): UV coordinates of shape [..., 2] Here is the function: def spherical_envmap_numpy(ray_dir, normal): """Computes matcap UV-coordinates from the ray direction and normal. Args: ray_dir (torch.Tensor): incoming ray direction of shape [...., 3] normal (torch.Tensor): surface normal of shape [..., 3] Returns: (torch.FloatTensor): UV coordinates of shape [..., 2] """ ray_dir_screen = ray_dir * np.array([1, 1, -1]) # Calculate reflection ray_dir_normal_dot = np.sum(normal * ray_dir_screen, axis=-1)[..., np.newaxis] r = ray_dir_screen - 2.0 * ray_dir_normal_dot * normal m = 2.0 * np.sqrt(r[..., 0] ** 2 + r[..., 1] ** 2 + (r[..., 2] - 1) ** 2) vN = (r[..., :2] / m[..., np.newaxis]) + 0.5 vN = 1.0 - vN vN = vN[..., :2].reshape(-1, 2) vN = np.clip(vN, 0, 1) vN[np.isnan(vN)] = 0 return vN
Computes matcap UV-coordinates from the ray direction and normal. Args: ray_dir (torch.Tensor): incoming ray direction of shape [...., 3] normal (torch.Tensor): surface normal of shape [..., 3] Returns: (torch.FloatTensor): UV coordinates of shape [..., 2]
23,051
import torch The provided code snippet includes necessary dependencies for implementing the `normalize_pointcloud` function. Write a Python function `def normalize_pointcloud(coords, return_scale=False)` to solve the following problem: Normalizes pointcloud to an AABB within [-1, 1]. Args: coords (torch.FloatTensor): 3D coordinates of shape [N, 3] return_scale (bool): If True, will return the center of the cloud and the scaling factor. Returns: (torch.FloatTensor) or (torch.FloatTensor, torch.FloatTensor, float): - Normalized 3D coordinates of shape [N, 3] - Center of the cloud of shape [3] - Scaling factor (floating point value) Here is the function: def normalize_pointcloud(coords, return_scale=False): """Normalizes pointcloud to an AABB within [-1, 1]. Args: coords (torch.FloatTensor): 3D coordinates of shape [N, 3] return_scale (bool): If True, will return the center of the cloud and the scaling factor. Returns: (torch.FloatTensor) or (torch.FloatTensor, torch.FloatTensor, float): - Normalized 3D coordinates of shape [N, 3] - Center of the cloud of shape [3] - Scaling factor (floating point value) """ coords_max, _ = torch.max(coords, dim=0) coords_min, _ = torch.min(coords, dim=0) coords_center = (coords_max + coords_min) / 2.0 # AABB normalize coords = coords - coords_center max_dist = torch.max(coords) coords_scale = 1.0 / max_dist coords *= coords_scale if return_scale: return coords, coords_center, coords_scale else: return coords
Normalizes pointcloud to an AABB within [-1, 1]. Args: coords (torch.FloatTensor): 3D coordinates of shape [N, 3] return_scale (bool): If True, will return the center of the cloud and the scaling factor. Returns: (torch.FloatTensor) or (torch.FloatTensor, torch.FloatTensor, float): - Normalized 3D coordinates of shape [N, 3] - Center of the cloud of shape [3] - Scaling factor (floating point value)
23,052
import torch The provided code snippet includes necessary dependencies for implementing the `create_pointcloud_from_images` function. Write a Python function `def create_pointcloud_from_images(rgbs, masks, rays, depths)` to solve the following problem: Given depth images, will create a RGB pointcloud. TODO (ttakikawa): Probably make the input a tensor not a list... Args: rgbs (list of torch.FloatTensor): List of RGB tensors of shape [H, W, 3]. masks (list of torch.FloatTensor): List of mask tensors of shape [H, W, 1]. rays (list of wisp.core.Rays): List of rays.origins and rays.dirs of shape [H, W, 3]. depths (list of torch.FloatTensor): List of depth tensors of shape [H, W, 1]. Returns: (torch.FloatTensor, torch.FloatTensor): - 3D coordinates of shape [N*H*W, 3] - colors of shape [N*H*W, 3] Here is the function: def create_pointcloud_from_images(rgbs, masks, rays, depths): """Given depth images, will create a RGB pointcloud. TODO (ttakikawa): Probably make the input a tensor not a list... Args: rgbs (list of torch.FloatTensor): List of RGB tensors of shape [H, W, 3]. masks (list of torch.FloatTensor): List of mask tensors of shape [H, W, 1]. rays (list of wisp.core.Rays): List of rays.origins and rays.dirs of shape [H, W, 3]. depths (list of torch.FloatTensor): List of depth tensors of shape [H, W, 1]. Returns: (torch.FloatTensor, torch.FloatTensor): - 3D coordinates of shape [N*H*W, 3] - colors of shape [N*H*W, 3] """ cloud_coords = [] cloud_colors = [] for i in range(len(rgbs)): mask = masks[i].bool() h, w = mask.shape[:2] mask = mask.reshape(h, w) depth = depths[i].reshape(h, w, 1) assert(len(mask.shape) == 2 and "Mask shape is not correct... it should be [H,W], check size here") coords = rays[i].origins[mask] + rays[i].dirs[mask] * depth[mask] colors = rgbs[i][mask] cloud_coords.append(coords.reshape(-1, 3)) cloud_colors.append(colors[...,:3].reshape(-1, 3)) return torch.cat(cloud_coords, dim=0), torch.cat(cloud_colors, dim=0)
Given depth images, will create a RGB pointcloud. TODO (ttakikawa): Probably make the input a tensor not a list... Args: rgbs (list of torch.FloatTensor): List of RGB tensors of shape [H, W, 3]. masks (list of torch.FloatTensor): List of mask tensors of shape [H, W, 1]. rays (list of wisp.core.Rays): List of rays.origins and rays.dirs of shape [H, W, 3]. depths (list of torch.FloatTensor): List of depth tensors of shape [H, W, 1]. Returns: (torch.FloatTensor, torch.FloatTensor): - 3D coordinates of shape [N*H*W, 3] - colors of shape [N*H*W, 3]
23,053
import torch def compute_sdf_iou(pred, gts): """Compute intersection over union for SDFs. Args: pred (torch.FloatTensor): Predicted signed distances gts (torch.FloatTensor): Groundtruth signed distances Returns: (float): The IOU score between 0 and 100. """ inside_pred = (pred < 0).byte() inside_gts = (gts < 0).byte() area_union = torch.sum((inside_pred | inside_gts).float()).item() area_intersect = torch.sum((inside_pred & inside_gts).float()).item() iou = area_intersect / area_union return 100.0 * iou The provided code snippet includes necessary dependencies for implementing the `compute_sparse_sdf_iou` function. Write a Python function `def compute_sparse_sdf_iou(nef, coords, gts, lod_idx=0)` to solve the following problem: Given a sparse SDF neural field, coordinates, and ground truth SDF, will calculate the narrowband IOU. In the case where the point does not exist in the bounds of the octree, will simply calculate those as intersections. Inputs: nef (wisp.models.NeuralFields) : The neural field. Assumed to be sparse. coords (torch.FloatTensor) : 3D coordinates of shape [N, 3] gts (torch.FloatTensor) : Ground truth SDF of shape [N, 1] lod_idx : level of detail (if specified) Returns: (float): The calculated IOU Here is the function: def compute_sparse_sdf_iou(nef, coords, gts, lod_idx=0): """Given a sparse SDF neural field, coordinates, and ground truth SDF, will calculate the narrowband IOU. In the case where the point does not exist in the bounds of the octree, will simply calculate those as intersections. Inputs: nef (wisp.models.NeuralFields) : The neural field. Assumed to be sparse. coords (torch.FloatTensor) : 3D coordinates of shape [N, 3] gts (torch.FloatTensor) : Ground truth SDF of shape [N, 1] lod_idx : level of detail (if specified) Returns: (float): The calculated IOU """ if nef.grid is None: raise Exception(f"{nef.__class__.__name__} is incompatible with this function.") pred = torch.zeros([coords.shape[0], 1]) query_results = nef.grid.query(gts, lod_idx=lod_idx) pidx = query_results.pidx mask = (pidx != -1) pred[mask] = nef(coords=gts[mask], pidx=pidx[mask], lod_idx=lod_idx, channels="sdf").cpu() pred[~mask] = gts[~mask] return compute_sdf_iou(pred, gts)
Given a sparse SDF neural field, coordinates, and ground truth SDF, will calculate the narrowband IOU. In the case where the point does not exist in the bounds of the octree, will simply calculate those as intersections. Inputs: nef (wisp.models.NeuralFields) : The neural field. Assumed to be sparse. coords (torch.FloatTensor) : 3D coordinates of shape [N, 3] gts (torch.FloatTensor) : Ground truth SDF of shape [N, 1] lod_idx : level of detail (if specified) Returns: (float): The calculated IOU
23,054
import torch The provided code snippet includes necessary dependencies for implementing the `sample_spc` function. Write a Python function `def sample_spc( corners : torch.Tensor, level : int, num_samples : int)` to solve the following problem: Sample uniformly in [-1,1] bounding volume within SPC voxels Args: corners (tensor) : set of corners to sample from level (int) : level to sample from num_samples (int) : number of points to sample Returns: (torch.FloatTensor): samples of shape [num_samples, 3] Here is the function: def sample_spc( corners : torch.Tensor, level : int, num_samples : int): """Sample uniformly in [-1,1] bounding volume within SPC voxels Args: corners (tensor) : set of corners to sample from level (int) : level to sample from num_samples (int) : number of points to sample Returns: (torch.FloatTensor): samples of shape [num_samples, 3] """ res = 2.0**level samples = torch.rand(corners.shape[0], num_samples, 3, device=corners.device) samples = corners[...,None,:3] + samples samples = samples.reshape(-1, 3) samples /= res return samples * 2.0 - 1.0
Sample uniformly in [-1,1] bounding volume within SPC voxels Args: corners (tensor) : set of corners to sample from level (int) : level to sample from num_samples (int) : number of points to sample Returns: (torch.FloatTensor): samples of shape [num_samples, 3]
23,055
import torch The provided code snippet includes necessary dependencies for implementing the `sample_from_depth_intervals` function. Write a Python function `def sample_from_depth_intervals(depth_intervals, num_samples)` to solve the following problem: Convert depth intervals to samples. SPC raytrace will return a [num_nuggets, 2] array where the first element is the entry depth and the second element is the exit depth. This function will convert them into a [num_nuggets, num_samples, 3] array of samples. Args: depth_intervals (torch.FloatTensor): intervals of shape [num_nuggets, 2] num_samples (int): sample size Returns: (torch.FloatTensor): Samples of shape [num_nuggets, num_samples, 3] Here is the function: def sample_from_depth_intervals(depth_intervals, num_samples): """Convert depth intervals to samples. SPC raytrace will return a [num_nuggets, 2] array where the first element is the entry depth and the second element is the exit depth. This function will convert them into a [num_nuggets, num_samples, 3] array of samples. Args: depth_intervals (torch.FloatTensor): intervals of shape [num_nuggets, 2] num_samples (int): sample size Returns: (torch.FloatTensor): Samples of shape [num_nuggets, num_samples, 3] """ device = depth_intervals.device steps = torch.arange(num_samples, device=device)[None].float().repeat([depth_intervals.shape[0], 1]) steps += torch.rand_like(steps) steps *= (1.0 / num_samples) samples = depth_intervals[..., 0:1] + (depth_intervals[..., 1:2] - depth_intervals[..., 0:1]) * steps return samples
Convert depth intervals to samples. SPC raytrace will return a [num_nuggets, 2] array where the first element is the entry depth and the second element is the exit depth. This function will convert them into a [num_nuggets, num_samples, 3] array of samples. Args: depth_intervals (torch.FloatTensor): intervals of shape [num_nuggets, 2] num_samples (int): sample size Returns: (torch.FloatTensor): Samples of shape [num_nuggets, num_samples, 3]
23,056
import torch The provided code snippet includes necessary dependencies for implementing the `expand_pack_boundary` function. Write a Python function `def expand_pack_boundary(pack_boundary, num_samples)` to solve the following problem: Expands the pack boundaries according to the number of samples. Args: pack_boundary (torch.BoolTensor): pack boundaries [N] num_samples (int): Number of samples Returns: (torch.BoolTensor): pack boundaries of shape [N*num_samples] Here is the function: def expand_pack_boundary(pack_boundary, num_samples): """Expands the pack boundaries according to the number of samples. Args: pack_boundary (torch.BoolTensor): pack boundaries [N] num_samples (int): Number of samples Returns: (torch.BoolTensor): pack boundaries of shape [N*num_samples] """ bigpack_boundary = torch.zeros(pack_boundary.shape[0]*num_samples, device=pack_boundary.device).bool() bigpack_boundary[pack_boundary.nonzero().long() * num_samples] = True bigpack_boundary = bigpack_boundary.int() return bigpack_boundary
Expands the pack boundaries according to the number of samples. Args: pack_boundary (torch.BoolTensor): pack boundaries [N] num_samples (int): Number of samples Returns: (torch.BoolTensor): pack boundaries of shape [N*num_samples]
23,057
import torch import numpy as np import kaolin.ops.spc as spc_ops The provided code snippet includes necessary dependencies for implementing the `create_dense_octree` function. Write a Python function `def create_dense_octree(level)` to solve the following problem: Creates a dense SPC model Args: level (int): The level at which the octree will be initialized to. Returns: (torch.ByteTensor): the octree tensor Here is the function: def create_dense_octree(level): """Creates a dense SPC model Args: level (int): The level at which the octree will be initialized to. Returns: (torch.ByteTensor): the octree tensor """ coords = np.arange(2**level) points = np.array(np.meshgrid(coords, coords, coords, indexing='xy')) points = points.transpose(3,2,1,0).reshape(-1, 3) points = torch.from_numpy(points).short().cuda() octree = spc_ops.unbatched_points_to_octree(points, level) return octree
Creates a dense SPC model Args: level (int): The level at which the octree will be initialized to. Returns: (torch.ByteTensor): the octree tensor
23,058
import torch import numpy as np import kaolin.ops.spc as spc_ops The provided code snippet includes necessary dependencies for implementing the `make_trilinear_spc` function. Write a Python function `def make_trilinear_spc(points, pyramid)` to solve the following problem: Builds a trilinear spc from a regular spc. Args: points (torch.ShortTensor): The point_hierarchy. pyramid (torch.LongTensor): The pyramid tensor. Returns: (torch.ShortTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor) - The dual point_hierarchy. - The dual pyramid. - The trinkets. - The parent pointers. Here is the function: def make_trilinear_spc(points, pyramid): """Builds a trilinear spc from a regular spc. Args: points (torch.ShortTensor): The point_hierarchy. pyramid (torch.LongTensor): The pyramid tensor. Returns: (torch.ShortTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor) - The dual point_hierarchy. - The dual pyramid. - The trinkets. - The parent pointers. """ points_dual, pyramid_dual = spc_ops.unbatched_make_dual(points, pyramid) trinkets, parents = spc_ops.unbatched_make_trinkets(points, pyramid, points_dual, pyramid_dual) return points_dual, pyramid_dual, trinkets, parents
Builds a trilinear spc from a regular spc. Args: points (torch.ShortTensor): The point_hierarchy. pyramid (torch.LongTensor): The pyramid tensor. Returns: (torch.ShortTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor) - The dual point_hierarchy. - The dual pyramid. - The trinkets. - The parent pointers.
23,059
import torch import kaolin.ops.spc as spc_ops import wisp.ops.mesh as mesh_ops from wisp.ops.spc.processing import dilate_points def dilate_points(points, level): """Dilates the octree points. Args: points (torch.ShortTensor): The SPC points from some level level (int): The level from which the points come from Returns: (torch.ShortTensor): The dilated points """ _x = torch.ShortTensor([[1 ,0 ,0]]).to(points.device) _y = torch.ShortTensor([[0 ,1 ,0]]).to(points.device) _z = torch.ShortTensor([[0 ,0 ,1]]).to(points.device) points = torch.cat([ points + _x, points - _x, points + _y, points - _y, points + _z, points - _z, points + _x + _y, points + _x - _y, points + _x + _z, points + _x - _z, points + _y + _x, points + _y - _x, points + _y + _z, points + _y - _z, points + _z + _x, points + _z - _x, points + _z + _y, points + _z - _y, points + _x + _y + _z, points + _x + _y - _z, points + _x - _y + _z, points + _x - _y - _z, points - _x + _y + _z, points - _x + _y - _z, points - _x - _y + _z, points - _x - _y - _z, ], dim=0) points = torch.clip(points, 0, 2** level - 1) unique, unique_keys, unique_counts = torch.unique(points.contiguous(), dim=0, return_inverse=True, return_counts=True) morton, keys = torch.sort(spc_ops.points_to_morton(unique.contiguous()).contiguous()) points = spc_ops.morton_to_points(morton.contiguous()) return points The provided code snippet includes necessary dependencies for implementing the `pointcloud_to_octree` function. Write a Python function `def pointcloud_to_octree(pointcloud, level, attributes=None, dilate=0)` to solve the following problem: Converts floating point coordinates to an octree. Args: pointcloud (torch.FloatTensor): 3D coordinates in [-1, 1] of shape [N, 3] level (int): Depth of the octreee attributes (torch.FloatTensor): Attributes of shape [N, F]. Will be averaged within voxels. dilate (int): Dilates the octree if specified. Returns: (torch.ByteTensor) or (torch.ByteTensor, torch.FloatTensor) - octree tensor - the averaged attributes Here is the function: def pointcloud_to_octree(pointcloud, level, attributes=None, dilate=0): """Converts floating point coordinates to an octree. Args: pointcloud (torch.FloatTensor): 3D coordinates in [-1, 1] of shape [N, 3] level (int): Depth of the octreee attributes (torch.FloatTensor): Attributes of shape [N, F]. Will be averaged within voxels. dilate (int): Dilates the octree if specified. Returns: (torch.ByteTensor) or (torch.ByteTensor, torch.FloatTensor) - octree tensor - the averaged attributes """ points = spc_ops.quantize_points(pointcloud.contiguous().cuda(), level) for i in range(dilate): points = dilate_points(points, level) unique, unique_keys, unique_counts = torch.unique(points.contiguous(), dim=0, return_inverse=True, return_counts=True) morton, keys = torch.sort(spc_ops.points_to_morton(unique.contiguous()).contiguous()) points = spc_ops.morton_to_points(morton.contiguous()) octree = spc_ops.unbatched_points_to_octree(points, level, sorted=True) if attributes is not None: att = torch.zeros_like(unique).float() att = att.index_add_(0, unique_keys, attributes) / unique_counts[... ,None].float() att = att[keys] return octree, att return octree
Converts floating point coordinates to an octree. Args: pointcloud (torch.FloatTensor): 3D coordinates in [-1, 1] of shape [N, 3] level (int): Depth of the octreee attributes (torch.FloatTensor): Attributes of shape [N, F]. Will be averaged within voxels. dilate (int): Dilates the octree if specified. Returns: (torch.ByteTensor) or (torch.ByteTensor, torch.FloatTensor) - octree tensor - the averaged attributes
23,060
import torch import kaolin.ops.spc as spc_ops import wisp.ops.mesh as mesh_ops from wisp.ops.spc.processing import dilate_points def mesh_to_spc(vertices, faces, level, num_samples=100000000): """Construct SPC from a mesh. Args: vertices (torch.FloatTensor): Vertices of shape [V, 3] faces (torch.LongTensor): Face indices of shape [F, 3] level (int): The level of the octree num_samples (int): The number of samples to be generated on the mesh surface. Returns: (torch.ByteTensor, torch.ShortTensor, torch.LongTensor, torch.BoolTensor): - the octree tensor - point hierarchy - pyramid - prefix """ octree = mesh_to_octree(vertices, faces, level, num_samples) points, pyramid, prefix = octree_to_spc(octree) return octree, points, pyramid, prefix The provided code snippet includes necessary dependencies for implementing the `mesh_to_trilinear_spc` function. Write a Python function `def mesh_to_trilinear_spc(vertices, faces, level)` to solve the following problem: Builds a trilinear spc from a regular spc. Args: vertices (torch.FloatTensor): Vertices of shape [V, 3] faces (torch.LongTensor): Face indices of shape [F, 3] level (int): The level of the octree Returns: (torch.ByteTensor, torch.ShortTensor, torch.LongTensor, torch.BoolTensor, torch.ShortTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor) - the octree tensor - point hierarchy - pyramid - prefix - The dual point_hierarchy. - The dual pyramid. - The trinkets. - The parent pointers. Here is the function: def mesh_to_trilinear_spc(vertices, faces, level): """Builds a trilinear spc from a regular spc. Args: vertices (torch.FloatTensor): Vertices of shape [V, 3] faces (torch.LongTensor): Face indices of shape [F, 3] level (int): The level of the octree Returns: (torch.ByteTensor, torch.ShortTensor, torch.LongTensor, torch.BoolTensor, torch.ShortTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor) - the octree tensor - point hierarchy - pyramid - prefix - The dual point_hierarchy. - The dual pyramid. - The trinkets. - The parent pointers. """ """Builds a trilinear spc from a mesh""" octree, points, pyramid, prefix = mesh_to_spc(vertices, faces, level) points_dual, pyramid_dual, trinkets, parents = build_trilinear_spc(points, pyramid) return octree, points, pyramid, prefix, points_dual, pyramid_dual, trinkets, parents
Builds a trilinear spc from a regular spc. Args: vertices (torch.FloatTensor): Vertices of shape [V, 3] faces (torch.LongTensor): Face indices of shape [F, 3] level (int): The level of the octree Returns: (torch.ByteTensor, torch.ShortTensor, torch.LongTensor, torch.BoolTensor, torch.ShortTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor) - the octree tensor - point hierarchy - pyramid - prefix - The dual point_hierarchy. - The dual pyramid. - The trinkets. - The parent pointers.
23,061
import torch The provided code snippet includes necessary dependencies for implementing the `total_variation` function. Write a Python function `def total_variation(pidx, trinkets, features, level)` to solve the following problem: Calculates total variation for the voxels specified by the pidx. Args: pidx : int tensor of size [N] specifying the point indices to calculate TV on. trinkets : the trinkets. features : the features for the given level. (assumes the correct level is given) level : int specifying the level of spc Returns: (torch.FloatTensor): Total variation of shape [1] Here is the function: def total_variation(pidx, trinkets, features, level): """Calculates total variation for the voxels specified by the pidx. Args: pidx : int tensor of size [N] specifying the point indices to calculate TV on. trinkets : the trinkets. features : the features for the given level. (assumes the correct level is given) level : int specifying the level of spc Returns: (torch.FloatTensor): Total variation of shape [1] """ resolution = 2**level # N, 8, F tensor of features feats = features[trinkets.index_select(0, pidx).long()] # N, F diff_x = ((torch.abs(feats[:, [4,5,6,7]] - feats[:, [0,1,2,3]]) / resolution)**2).sum((1,2)) diff_y = ((torch.abs(feats[:, [2,3,6,7]] - feats[:, [0,1,4,5]]) / resolution)**2).sum((1,2)) diff_z = ((torch.abs(feats[:, [1,3,5,7]] - feats[:, [0,2,4,6]]) / resolution)**2).sum((1,2)) return diff_x + diff_y + diff_z
Calculates total variation for the voxels specified by the pidx. Args: pidx : int tensor of size [N] specifying the point indices to calculate TV on. trinkets : the trinkets. features : the features for the given level. (assumes the correct level is given) level : int specifying the level of spc Returns: (torch.FloatTensor): Total variation of shape [1]
23,062
from typing import Union, Type, TYPE_CHECKING, List, Callable, Any, Optional import dataclasses The provided code snippet includes necessary dependencies for implementing the `autoconfig` function. Write a Python function `def autoconfig(*classes_and_callables: Type, exclude: List[Callable] = None) -> Any` to solve the following problem: Generates a list of Config dataclasses for each of the classes or functions (i.e. specific constructors). The class constructors / callables must be type annotated for this function to succeed. Otherwise, see configure(). Specifically, this function will: 1. Inspect the given classes in classes_and_callables and extract all constructors found. 2. Add specific functions from classes_and_callables to the list of configs to generate. 3. Build a config dataclass for each constructor variant, using hydra_zen. The dataclass fields should mirror the constructor's arguments. Types not supported by the config are recorded an internal field and removed. 4. Annotate each config with a CLI subcommand. Later, these subcommands are used by tyro to choose which config class to load, among this generated set. 5. If more than one config class is generated, this function returns a Union of configs. The configuration options are determined by the contents of the union. A constructor in wisp is assumed to be simplistic, and defined as either: (1) __init__ explicitly defined within the class body. (2) A @classmethod that is annotated as returning the same class type, see example below. Other cases, i.e. __new__ keyword, are not explicitly supported. Role of this function in config lifecycle: - Instances of these dataclasses can be populated by calling parse_config(), the CLI / reading config yaml contents. - Config dataclasses can be directly converted to actual instances with instantiate(). Usage example: ``` class Foo: def __init__(): ... @classmethod def from_X() -> Foo class Bar: def __init__(): ... @classmethod def from_X() -> Bar @classmethod def from_Y() -> Bar FooBarConfig = autoconfig(Foo, Bar.from_Y) # Typedef: Union[ConfigFoo, ConfigFooFromX, ConfigBarFromY]. cfg = parse_config(FooBarConfig) # Parse args from the CLI and yaml, # e.g. tyro.cli is used to read from CLI one of these types FooOrBarInstance = instantiate(cfg) # Creates Foo or Bar instance. # e.g. hydra_zen.instantiate is used to build the object # from the config ``` A convenient pattern is to use autoconfig within a larger dataclass config. The actual config class used can be chosen by the CLI, or config yaml, using abbreviated subcommand names, for example: ``` @dataclass class AppConfig: foobar: autoconfig(Foo, Bar.from_Y) # type: Union[ConfigFoo, ConfigFooFromX, ConfigBarFromY] cfg = parse_config(AppConfig) # This will call tyro.cli(AppConfig) ``` cfg can be any of the config types: ConfigFoo, ConfigFooFromX, ConfigBarFromY. To choose them via CLI, one can specify: foobar:Foo, foobar:Foo.from-x, foobar:Bar.from-y Here underscores in constructor names are replaced with dashes. Specific constructor arguments can be passed as --arg1 --arg2. To choose them via a config yaml, one can specify: ``` foobar: constructor: 'Foo.from_x' ... args1: value # args that Foo.from_x takes arg2: value ... ``` Tip: Running the script with --help will trigger tyro to print the available configs. Args: *classes_and_callables (Type): Variable list of types for configuration. Supports the following: - classes: Wisp will scan the class for constructions methods, and generate a config dataclass for each. - constructors / functions: Specific construction methods can also be stated. exclude (List[Callable]): If specified, particular classes or constructors will be omitted from the returned configs. exclude can be used to filter specific constructors when generating configs for entire class types. Returns: - (Type) If classes_and_callables yield a single target constructor, returns a single dynamic dataclass config type for this target constructor. - (typing.Union[Type, *] If classes_and_callables yield multiple target constructors, returns a union of dynamic dataclass config types for each target constructor. Here is the function: def autoconfig(*classes_and_callables: Type, exclude: List[Callable] = None) -> Any: """ Generates a list of Config dataclasses for each of the classes or functions (i.e. specific constructors). The class constructors / callables must be type annotated for this function to succeed. Otherwise, see configure(). Specifically, this function will: 1. Inspect the given classes in classes_and_callables and extract all constructors found. 2. Add specific functions from classes_and_callables to the list of configs to generate. 3. Build a config dataclass for each constructor variant, using hydra_zen. The dataclass fields should mirror the constructor's arguments. Types not supported by the config are recorded an internal field and removed. 4. Annotate each config with a CLI subcommand. Later, these subcommands are used by tyro to choose which config class to load, among this generated set. 5. If more than one config class is generated, this function returns a Union of configs. The configuration options are determined by the contents of the union. A constructor in wisp is assumed to be simplistic, and defined as either: (1) __init__ explicitly defined within the class body. (2) A @classmethod that is annotated as returning the same class type, see example below. Other cases, i.e. __new__ keyword, are not explicitly supported. Role of this function in config lifecycle: - Instances of these dataclasses can be populated by calling parse_config(), the CLI / reading config yaml contents. - Config dataclasses can be directly converted to actual instances with instantiate(). Usage example: ``` class Foo: def __init__(): ... @classmethod def from_X() -> Foo class Bar: def __init__(): ... @classmethod def from_X() -> Bar @classmethod def from_Y() -> Bar FooBarConfig = autoconfig(Foo, Bar.from_Y) # Typedef: Union[ConfigFoo, ConfigFooFromX, ConfigBarFromY]. cfg = parse_config(FooBarConfig) # Parse args from the CLI and yaml, # e.g. tyro.cli is used to read from CLI one of these types FooOrBarInstance = instantiate(cfg) # Creates Foo or Bar instance. # e.g. hydra_zen.instantiate is used to build the object # from the config ``` A convenient pattern is to use autoconfig within a larger dataclass config. The actual config class used can be chosen by the CLI, or config yaml, using abbreviated subcommand names, for example: ``` @dataclass class AppConfig: foobar: autoconfig(Foo, Bar.from_Y) # type: Union[ConfigFoo, ConfigFooFromX, ConfigBarFromY] cfg = parse_config(AppConfig) # This will call tyro.cli(AppConfig) ``` cfg can be any of the config types: ConfigFoo, ConfigFooFromX, ConfigBarFromY. To choose them via CLI, one can specify: foobar:Foo, foobar:Foo.from-x, foobar:Bar.from-y Here underscores in constructor names are replaced with dashes. Specific constructor arguments can be passed as --arg1 --arg2. To choose them via a config yaml, one can specify: ``` foobar: constructor: 'Foo.from_x' ... args1: value # args that Foo.from_x takes arg2: value ... ``` Tip: Running the script with --help will trigger tyro to print the available configs. Args: *classes_and_callables (Type): Variable list of types for configuration. Supports the following: - classes: Wisp will scan the class for constructions methods, and generate a config dataclass for each. - constructors / functions: Specific construction methods can also be stated. exclude (List[Callable]): If specified, particular classes or constructors will be omitted from the returned configs. exclude can be used to filter specific constructors when generating configs for entire class types. Returns: - (Type) If classes_and_callables yield a single target constructor, returns a single dynamic dataclass config type for this target constructor. - (typing.Union[Type, *] If classes_and_callables yield multiple target constructors, returns a union of dynamic dataclass config types for each target constructor. """ import wisp.config._hydrazen as hydrazen_parser import wisp.config._tyro as tyro_parser if TYPE_CHECKING: from hydra_zen.typing import Builds # Enable static code analysis # Note: Callables are not handled here. This line assumes classes_and_callables contains # only classes. To support callables, we need to retrieve the classes callables belong to. return Builds[Type[Union.__getitem__(tuple(classes_and_callables))]] else: # Runtime: generate actual dynamic dataclasses for each class constructor and callable here config_dataclasses = hydrazen_parser.configs_for(*classes_and_callables, exclude=exclude) # Before returning the generated configs, we annotate them into a convenient subcommand format, i.e: # config class: `ConfigClassNameConstructorName` --> subcommand: `ClassName.constructor-name` # This allows CLI to specify the config choice via subcommands in our specified format if len(config_dataclasses) == 1: # For a single callable, return the config class type. return tyro_parser.annotate_subcommand(config_dataclasses[0]) else: # If a class(es) of multiple callables, or multiple callables were specified, return the union of configs annotated_dataclasses = [] for config_type in config_dataclasses: annotated_dataclasses.append(tyro_parser.annotate_subcommand(config_type)) return Union.__getitem__(tuple(annotated_dataclasses))
Generates a list of Config dataclasses for each of the classes or functions (i.e. specific constructors). The class constructors / callables must be type annotated for this function to succeed. Otherwise, see configure(). Specifically, this function will: 1. Inspect the given classes in classes_and_callables and extract all constructors found. 2. Add specific functions from classes_and_callables to the list of configs to generate. 3. Build a config dataclass for each constructor variant, using hydra_zen. The dataclass fields should mirror the constructor's arguments. Types not supported by the config are recorded an internal field and removed. 4. Annotate each config with a CLI subcommand. Later, these subcommands are used by tyro to choose which config class to load, among this generated set. 5. If more than one config class is generated, this function returns a Union of configs. The configuration options are determined by the contents of the union. A constructor in wisp is assumed to be simplistic, and defined as either: (1) __init__ explicitly defined within the class body. (2) A @classmethod that is annotated as returning the same class type, see example below. Other cases, i.e. __new__ keyword, are not explicitly supported. Role of this function in config lifecycle: - Instances of these dataclasses can be populated by calling parse_config(), the CLI / reading config yaml contents. - Config dataclasses can be directly converted to actual instances with instantiate(). Usage example: ``` class Foo: def __init__(): ... @classmethod def from_X() -> Foo class Bar: def __init__(): ... @classmethod def from_X() -> Bar @classmethod def from_Y() -> Bar FooBarConfig = autoconfig(Foo, Bar.from_Y) # Typedef: Union[ConfigFoo, ConfigFooFromX, ConfigBarFromY]. cfg = parse_config(FooBarConfig) # Parse args from the CLI and yaml, # e.g. tyro.cli is used to read from CLI one of these types FooOrBarInstance = instantiate(cfg) # Creates Foo or Bar instance. # e.g. hydra_zen.instantiate is used to build the object # from the config ``` A convenient pattern is to use autoconfig within a larger dataclass config. The actual config class used can be chosen by the CLI, or config yaml, using abbreviated subcommand names, for example: ``` @dataclass class AppConfig: foobar: autoconfig(Foo, Bar.from_Y) # type: Union[ConfigFoo, ConfigFooFromX, ConfigBarFromY] cfg = parse_config(AppConfig) # This will call tyro.cli(AppConfig) ``` cfg can be any of the config types: ConfigFoo, ConfigFooFromX, ConfigBarFromY. To choose them via CLI, one can specify: foobar:Foo, foobar:Foo.from-x, foobar:Bar.from-y Here underscores in constructor names are replaced with dashes. Specific constructor arguments can be passed as --arg1 --arg2. To choose them via a config yaml, one can specify: ``` foobar: constructor: 'Foo.from_x' ... args1: value # args that Foo.from_x takes arg2: value ... ``` Tip: Running the script with --help will trigger tyro to print the available configs. Args: *classes_and_callables (Type): Variable list of types for configuration. Supports the following: - classes: Wisp will scan the class for constructions methods, and generate a config dataclass for each. - constructors / functions: Specific construction methods can also be stated. exclude (List[Callable]): If specified, particular classes or constructors will be omitted from the returned configs. exclude can be used to filter specific constructors when generating configs for entire class types. Returns: - (Type) If classes_and_callables yield a single target constructor, returns a single dynamic dataclass config type for this target constructor. - (typing.Union[Type, *] If classes_and_callables yield multiple target constructors, returns a union of dynamic dataclass config types for each target constructor.
23,063
from typing import Union, Type, TYPE_CHECKING, List, Callable, Any, Optional import dataclasses The provided code snippet includes necessary dependencies for implementing the `configure` function. Write a Python function `def configure(cls=None, /, *, target: Callable[..., Any] = None, import_error: str = None)` to solve the following problem: @configure decorates a given dataclass type, cls, as a configuration class that instantiates the target type. Use this function when configuring non-typed constructors, for example: ``` @configure(target=torch.optim.Adam) # This config can build torch.optim.Adam class ConfigAdam: lr: float betas: Tuple[float, float] = (0.9, 0.999) eps: float = 1e-8 ``` torch.optim.Adam.__init__ doesn't define type annotations for the args, but we can still support building this object by explicitly stating the types in this config. Essentially, this config says: these are the args I want to configure via CLI / yaml. By decorating with @configure, we can later call: optimizer = instantiate(ConfigAdam, params=params) to obtain an instance of torch.optim.Adam on the parameters of some trainable model. If target is None, @configure builds nothing, and simply acts as an alias to @dataclass. Role of this function in config lifecycle: - Instances of these dataclasses can be populated by calling parse_config(), the CLI / reading config yaml contents. - Config dataclasses can be directly converted to actual instances with instantiate(). Usage example: ``` @configure(target=torch.optim.Adam) # This config can build torch.optim.Adam class ConfigAdam: lr: float betas: Tuple[float, float] = (0.9, 0.999) ... @configure(target=torch.optim.RMSprop) # This config can build torch.optim.RMSprop class ConfigRMSprop: lr: float = 1e-2 alpha: float = 0.99 ... @dataclass class AppConfig: optimizer: Union[ConfigAdam, ConfigRMSprop] # Explicit config set cfg = parse_config(AppConfig) # Parse args from the CLI and yaml to populate cfg optimizer = instantiate(cfg.optimizer) # Creates Adam or RMSprop instance ``` To choose an optimizer via CLI, one can specify: optimizer:adam, optimizer:rmsprop Specific constructor arguments can be passed as --lr --optimizer.alpha. To choose an optimizer via a config yaml, one can specify: ``` optimizer: constructor: 'Adam' ... lr: 1e-2 eps: 1e-8 ... ``` Tip: Running the script with --help will trigger tyro to print the available configs. Args: cls (Type): A simple class, whose fields correspond in name to the configurable args in the target (default constructor if target is a class). Users should specify the typing of args to allow the configuration system to initialize this object. Default values are not mandatory, and are only used when a value is not stated through the CLI / config yaml. If cls is missing args that exist in the target's function, default values will be used from the target function. Otherwise, instantiating the object will fail due to missing required args. target (Callable[..., Any]): A target class or constructor to build with the decorated config class. Constructors in wisp are assumed to simplistic, and defined as either: (1) __init__ explicitly defined within the class body. By default, this is used for target classes. (2) A @classmethod that is annotated as returning the same class type. import_error (str): An optional import error to show in case the target class is missing. Here is the function: def configure(cls=None, /, *, target: Callable[..., Any] = None, import_error: str = None): """ @configure decorates a given dataclass type, cls, as a configuration class that instantiates the target type. Use this function when configuring non-typed constructors, for example: ``` @configure(target=torch.optim.Adam) # This config can build torch.optim.Adam class ConfigAdam: lr: float betas: Tuple[float, float] = (0.9, 0.999) eps: float = 1e-8 ``` torch.optim.Adam.__init__ doesn't define type annotations for the args, but we can still support building this object by explicitly stating the types in this config. Essentially, this config says: these are the args I want to configure via CLI / yaml. By decorating with @configure, we can later call: optimizer = instantiate(ConfigAdam, params=params) to obtain an instance of torch.optim.Adam on the parameters of some trainable model. If target is None, @configure builds nothing, and simply acts as an alias to @dataclass. Role of this function in config lifecycle: - Instances of these dataclasses can be populated by calling parse_config(), the CLI / reading config yaml contents. - Config dataclasses can be directly converted to actual instances with instantiate(). Usage example: ``` @configure(target=torch.optim.Adam) # This config can build torch.optim.Adam class ConfigAdam: lr: float betas: Tuple[float, float] = (0.9, 0.999) ... @configure(target=torch.optim.RMSprop) # This config can build torch.optim.RMSprop class ConfigRMSprop: lr: float = 1e-2 alpha: float = 0.99 ... @dataclass class AppConfig: optimizer: Union[ConfigAdam, ConfigRMSprop] # Explicit config set cfg = parse_config(AppConfig) # Parse args from the CLI and yaml to populate cfg optimizer = instantiate(cfg.optimizer) # Creates Adam or RMSprop instance ``` To choose an optimizer via CLI, one can specify: optimizer:adam, optimizer:rmsprop Specific constructor arguments can be passed as --lr --optimizer.alpha. To choose an optimizer via a config yaml, one can specify: ``` optimizer: constructor: 'Adam' ... lr: 1e-2 eps: 1e-8 ... ``` Tip: Running the script with --help will trigger tyro to print the available configs. Args: cls (Type): A simple class, whose fields correspond in name to the configurable args in the target (default constructor if target is a class). Users should specify the typing of args to allow the configuration system to initialize this object. Default values are not mandatory, and are only used when a value is not stated through the CLI / config yaml. If cls is missing args that exist in the target's function, default values will be used from the target function. Otherwise, instantiating the object will fail due to missing required args. target (Callable[..., Any]): A target class or constructor to build with the decorated config class. Constructors in wisp are assumed to simplistic, and defined as either: (1) __init__ explicitly defined within the class body. By default, this is used for target classes. (2) A @classmethod that is annotated as returning the same class type. import_error (str): An optional import error to show in case the target class is missing. """ def _process_class(cls): """ Execute the wrapper logic: replace this class with a proper config class which is also compatible with instantiate() and parse_config(). """ import docstring_parser import wisp.config._hydrazen as hydrazen_parser import wisp.config._tyro as tyro_parser if target is not None: config_fields = dict() if cls is not None: config_fields.update(cls.__annotations__) # Tell hydra-zen what typed args cls has wrapper = hydrazen_parser.build_config_for_target(target=target, config_fields=config_fields, import_error=import_error) is_annotate_subcommand = True else: # No target, so defer to act as a @dataclass wrapper wrapper = dataclasses.dataclass is_annotate_subcommand = False config_class = wrapper(cls) # Create docstring (or shortened version of it) for help if target is not None: doc = None if target.__doc__ is not None: doc = docstring_parser.parse(target.__doc__).short_description if not doc: doc = f'Builds {target.__name__}.' config_class.__doc__ = doc if is_annotate_subcommand: # Make it CLI / yaml compatible config_class = tyro_parser.annotate_subcommand(config_class) return config_class if cls is None: return _process_class # Support @configure(), @configure(target=...) with parenthesis. else: return _process_class(cls) # Support @configure without parenthesis.
@configure decorates a given dataclass type, cls, as a configuration class that instantiates the target type. Use this function when configuring non-typed constructors, for example: ``` @configure(target=torch.optim.Adam) # This config can build torch.optim.Adam class ConfigAdam: lr: float betas: Tuple[float, float] = (0.9, 0.999) eps: float = 1e-8 ``` torch.optim.Adam.__init__ doesn't define type annotations for the args, but we can still support building this object by explicitly stating the types in this config. Essentially, this config says: these are the args I want to configure via CLI / yaml. By decorating with @configure, we can later call: optimizer = instantiate(ConfigAdam, params=params) to obtain an instance of torch.optim.Adam on the parameters of some trainable model. If target is None, @configure builds nothing, and simply acts as an alias to @dataclass. Role of this function in config lifecycle: - Instances of these dataclasses can be populated by calling parse_config(), the CLI / reading config yaml contents. - Config dataclasses can be directly converted to actual instances with instantiate(). Usage example: ``` @configure(target=torch.optim.Adam) # This config can build torch.optim.Adam class ConfigAdam: lr: float betas: Tuple[float, float] = (0.9, 0.999) ... @configure(target=torch.optim.RMSprop) # This config can build torch.optim.RMSprop class ConfigRMSprop: lr: float = 1e-2 alpha: float = 0.99 ... @dataclass class AppConfig: optimizer: Union[ConfigAdam, ConfigRMSprop] # Explicit config set cfg = parse_config(AppConfig) # Parse args from the CLI and yaml to populate cfg optimizer = instantiate(cfg.optimizer) # Creates Adam or RMSprop instance ``` To choose an optimizer via CLI, one can specify: optimizer:adam, optimizer:rmsprop Specific constructor arguments can be passed as --lr --optimizer.alpha. To choose an optimizer via a config yaml, one can specify: ``` optimizer: constructor: 'Adam' ... lr: 1e-2 eps: 1e-8 ... ``` Tip: Running the script with --help will trigger tyro to print the available configs. Args: cls (Type): A simple class, whose fields correspond in name to the configurable args in the target (default constructor if target is a class). Users should specify the typing of args to allow the configuration system to initialize this object. Default values are not mandatory, and are only used when a value is not stated through the CLI / config yaml. If cls is missing args that exist in the target's function, default values will be used from the target function. Otherwise, instantiating the object will fail due to missing required args. target (Callable[..., Any]): A target class or constructor to build with the decorated config class. Constructors in wisp are assumed to simplistic, and defined as either: (1) __init__ explicitly defined within the class body. By default, this is used for target classes. (2) A @classmethod that is annotated as returning the same class type. import_error (str): An optional import error to show in case the target class is missing.
23,064
from typing import Union, Type, TYPE_CHECKING, List, Callable, Any, Optional import dataclasses The provided code snippet includes necessary dependencies for implementing the `instantiate` function. Write a Python function `def instantiate(config, **kwargs)` to solve the following problem: Builds an object from a config dataclass. Given a config dataclass defined with @configure or autoconfig, and populated with values from CLI / yaml with parse_config, instantiate will invoke the constructor of the target and pass the arg values kept in the config. A common pattern is to instantiate a hierarchy of objects. In this case, instantiate can be used to build each level of this hierarchy, where the inner object is passed as a kwarg, for example: ``` # NeuralRadianceField contains a grid, and grid contains a blas blas = instantiate(cfg.blas) grid = instantiate(cfg.grid, blas=blas) # If grid doesn't need blas, it will be ignored nef = instantiate(cfg.nef, grid=grid) # If nef doesn't need grid, it will be ignored ``` Note that instantiate will not fail for grid variants that don't use the blas arg: it will be silently ignored, as if the target constructor signature had a **kwargs absorbing excessive args. This makes the main scripts shorter and flexible, at the cost of strictly matching the instantiated object target signature. Args: config: An instance of a config class. config dataclasses are defined with @configure or autoconfig, and populated with values from CLI / yaml with parse_config **kwargs: If specified, may contain: 1. Additional args to pass to the invoked target, not contained in the config class. This is useful, i.e., for building a hierarchy of composed objects, see example above. 2. Override values for args defined in the config class. Returns: - (object) A new instance of the target buildable by the config class. Here is the function: def instantiate(config, **kwargs): """ Builds an object from a config dataclass. Given a config dataclass defined with @configure or autoconfig, and populated with values from CLI / yaml with parse_config, instantiate will invoke the constructor of the target and pass the arg values kept in the config. A common pattern is to instantiate a hierarchy of objects. In this case, instantiate can be used to build each level of this hierarchy, where the inner object is passed as a kwarg, for example: ``` # NeuralRadianceField contains a grid, and grid contains a blas blas = instantiate(cfg.blas) grid = instantiate(cfg.grid, blas=blas) # If grid doesn't need blas, it will be ignored nef = instantiate(cfg.nef, grid=grid) # If nef doesn't need grid, it will be ignored ``` Note that instantiate will not fail for grid variants that don't use the blas arg: it will be silently ignored, as if the target constructor signature had a **kwargs absorbing excessive args. This makes the main scripts shorter and flexible, at the cost of strictly matching the instantiated object target signature. Args: config: An instance of a config class. config dataclasses are defined with @configure or autoconfig, and populated with values from CLI / yaml with parse_config **kwargs: If specified, may contain: 1. Additional args to pass to the invoked target, not contained in the config class. This is useful, i.e., for building a hierarchy of composed objects, see example above. 2. Override values for args defined in the config class. Returns: - (object) A new instance of the target buildable by the config class. """ from hydra_zen import instantiate, is_partial_builds import wisp.config._hydrazen as hydrazen_parser try: hydrazen_parser.get_target(config) except TypeError as e: raise TypeError(f'config dataclass cannot be instantiated: {config}. Make sure that:\n' f'1. This config class was defined with @configure or autoconfig' f'2. parse_config() was used to build this config object.') from e # Partial build means the config doesn't contain all the args required to build the object. is_partial_build = is_partial_builds(config) available_args = hydrazen_parser.get_supported_args(config) remaining_args = hydrazen_parser.get_missing_args(config) overriden_args = {k: v for k, v in kwargs.items() if k in available_args} instance = instantiate(config, **overriden_args) # In this case, hydra zen instantiates a functools.partial and we have to invoke # the returned partial callable again with the remaining args. if is_partial_build: args_for_partial = {k: v for k, v in kwargs.items() if k in remaining_args} instance = instance(**args_for_partial) return instance
Builds an object from a config dataclass. Given a config dataclass defined with @configure or autoconfig, and populated with values from CLI / yaml with parse_config, instantiate will invoke the constructor of the target and pass the arg values kept in the config. A common pattern is to instantiate a hierarchy of objects. In this case, instantiate can be used to build each level of this hierarchy, where the inner object is passed as a kwarg, for example: ``` # NeuralRadianceField contains a grid, and grid contains a blas blas = instantiate(cfg.blas) grid = instantiate(cfg.grid, blas=blas) # If grid doesn't need blas, it will be ignored nef = instantiate(cfg.nef, grid=grid) # If nef doesn't need grid, it will be ignored ``` Note that instantiate will not fail for grid variants that don't use the blas arg: it will be silently ignored, as if the target constructor signature had a **kwargs absorbing excessive args. This makes the main scripts shorter and flexible, at the cost of strictly matching the instantiated object target signature. Args: config: An instance of a config class. config dataclasses are defined with @configure or autoconfig, and populated with values from CLI / yaml with parse_config **kwargs: If specified, may contain: 1. Additional args to pass to the invoked target, not contained in the config class. This is useful, i.e., for building a hierarchy of composed objects, see example above. 2. Override values for args defined in the config class. Returns: - (object) A new instance of the target buildable by the config class.
23,065
from typing import Union, Type, TYPE_CHECKING, List, Callable, Any, Optional import dataclasses def parse_args_tyro(config_type, yaml_arg: Optional[str]='--config'): """Parse args from a config dataclass. args = parse_args_tyro(AppConfig) Args: config_type (type): The type for the config object. yaml_arg (str): Name of config path arg. By default this is --config. Expected to start with --. If None, no configuration yaml is expected. Returns: (dataclass): Parsed config dataclass object. """ # Configure tyro's options. # ---------------------------- # tyro.conf.AvoidSubcommands: # Avoid creating subcommands when a default is provided for unions over nested types. # tyro.conf.ConsolidateSubcommandArgs: # More robust to reordering of options, ensuring that any new options can simply be placed at the end # of the command. i.e. allows the latter instead of the former: # `main.py grid:hash --grid.arg1 foo --grid.arg2 bar dataset:nerf --dataset.root baz # `main.py grid:hash dataset:nerf --grid.arg1 foo --dataset.root baz --grid.arg2 bar # tyro.conf.FlagConversionOff # Required to support both optional and non-optional boolean args # tyro.conf.SuppressFixed # Hides fields which are marked as fixed (i.e. predetermined value in dataclass). # Useful for hiding away zen_meta fields from hydra-zen. tyro_markers = [ tyro.conf.AvoidSubcommands, tyro.conf.ConsolidateSubcommandArgs, tyro.conf.FlagConversionOff, tyro.conf.SuppressFixed, ] apply_tyro_markers = tyro.conf.configure(*tyro_markers) decorated_config_type = apply_tyro_markers(config_type) # If a config file has been specified, pop it from sys.argv and return the path. path = None if yaml_arg is not None: path = find_config_file(sys.argv, yaml_arg) cli_subcommand_pos, cli_args = _collect_cli_args_and_subcommands() cli_mapping = _resolve_shortened_arg_names(config_type, cli_args) config_subcommands, config_args = None, None # If a config file is passed in, use as default. if path is not None: assert os.path.exists(path), f'Invalid configuration file path: "{path}". Please review your {yaml_arg} arg.' config_subcommands, config_args = load_config(path) # Rebuild sys.argv here in a format that satisfies tyro's conditions for subcommands and full arg names _reform_sys_argv(config_type, config_subcommands, config_args, cli_subcommand_pos, cli_args, cli_mapping) # If printing help, remove the ConsolidateSubcommandArgs marker which disorients the custom formatter additional_info = None if len(sys.argv) == 1 or '--help' in sys.argv or '-h' in sys.argv: if tyro.conf.ConsolidateSubcommandArgs in tyro_markers: del tyro_markers[tyro_markers.index(tyro.conf.ConsolidateSubcommandArgs)] apply_tyro_markers = tyro.conf.configure(*tyro_markers) decorated_config_type = apply_tyro_markers(config_type) else: # Use tyro's underlying parser to handle any cases we have custom help messages for. # If an error with a custom message occurs, this lines exits the program. # Otherwise, this line returns gracefully and we let tyro take care of the rest. additional_info = handle_custom_errors(decorated_config_type) try: args = tyro.cli(decorated_config_type) except SystemExit as e: if additional_info: sys.stderr.write(additional_info) raise e return args The provided code snippet includes necessary dependencies for implementing the `parse_config` function. Write a Python function `def parse_config(config_type, yaml_arg: Optional[str] = '--config')` to solve the following problem: This function will: 1. Parse args from the CLI and optional config yaml path. 2. Create and populate an instance of the config dataclass type. Usage example: ``` @dataclass class AppConfig: grid: autoconfig(TriplanarGrid, HashGrid) # type: Union[ConfigTriplanarGrid, ConfigHashGrid, ConfigHashGridFromGeometric, ...] nerf: autoconfig(NeuralRadianceField) # type: ConfigNeuralRadianceField optimizer: Union[ConfigAdam, ConfigRMSprop] # Explicit config set. This is useful, i.e., because the # type annotations of torch are not 100% reliable cfg = parse_config(AppConfig, yaml_arg='--config') # python main.py --config my_config.yaml --arg1 # cfg.grid, cfg.nerf, cfg.optimizer are now filled with values populated from my_config.yaml and arg1 ``` The CLI can with this function as follows: ``` Use just the config: > python main.py --config my_config.yaml Print help: > python main.py --help Use the config and override dataset path: > python main.py --config my_config.yaml --dataset-path data/lego/ Use the config with an arg that is used by more than one field (i.e. disambiguate from tracer.bg-color): > python main.py --config my_config.yaml --dataset.dataset-path data/lego/ Select a different optimizer variant: > python main.py --config app/nerf/configs/nerf_hash.yaml optimizer:adam Select a different optimizer and grid variants: > python main.py --config app/nerf/configs/nerf_hash.yaml optimizer:adam grid.TriplanarGrid: ``` The priority of args is determined by: 1. Args specified through CLI. 2. Args specified through config yaml. 3. Defaults defined in config dataclass. Args: config_type (type): The type for the config object. yaml_arg (str): Name of config path arg. By default this is --config. Expected to start with --. Returns: (dataclass): Config dataclass instance, initialized with parsed args. Here is the function: def parse_config(config_type, yaml_arg: Optional[str] = '--config'): """ This function will: 1. Parse args from the CLI and optional config yaml path. 2. Create and populate an instance of the config dataclass type. Usage example: ``` @dataclass class AppConfig: grid: autoconfig(TriplanarGrid, HashGrid) # type: Union[ConfigTriplanarGrid, ConfigHashGrid, ConfigHashGridFromGeometric, ...] nerf: autoconfig(NeuralRadianceField) # type: ConfigNeuralRadianceField optimizer: Union[ConfigAdam, ConfigRMSprop] # Explicit config set. This is useful, i.e., because the # type annotations of torch are not 100% reliable cfg = parse_config(AppConfig, yaml_arg='--config') # python main.py --config my_config.yaml --arg1 # cfg.grid, cfg.nerf, cfg.optimizer are now filled with values populated from my_config.yaml and arg1 ``` The CLI can with this function as follows: ``` Use just the config: > python main.py --config my_config.yaml Print help: > python main.py --help Use the config and override dataset path: > python main.py --config my_config.yaml --dataset-path data/lego/ Use the config with an arg that is used by more than one field (i.e. disambiguate from tracer.bg-color): > python main.py --config my_config.yaml --dataset.dataset-path data/lego/ Select a different optimizer variant: > python main.py --config app/nerf/configs/nerf_hash.yaml optimizer:adam Select a different optimizer and grid variants: > python main.py --config app/nerf/configs/nerf_hash.yaml optimizer:adam grid.TriplanarGrid: ``` The priority of args is determined by: 1. Args specified through CLI. 2. Args specified through config yaml. 3. Defaults defined in config dataclass. Args: config_type (type): The type for the config object. yaml_arg (str): Name of config path arg. By default this is --config. Expected to start with --. Returns: (dataclass): Config dataclass instance, initialized with parsed args. """ from ._tyro import parse_args_tyro return parse_args_tyro(config_type, yaml_arg)
This function will: 1. Parse args from the CLI and optional config yaml path. 2. Create and populate an instance of the config dataclass type. Usage example: ``` @dataclass class AppConfig: grid: autoconfig(TriplanarGrid, HashGrid) # type: Union[ConfigTriplanarGrid, ConfigHashGrid, ConfigHashGridFromGeometric, ...] nerf: autoconfig(NeuralRadianceField) # type: ConfigNeuralRadianceField optimizer: Union[ConfigAdam, ConfigRMSprop] # Explicit config set. This is useful, i.e., because the # type annotations of torch are not 100% reliable cfg = parse_config(AppConfig, yaml_arg='--config') # python main.py --config my_config.yaml --arg1 # cfg.grid, cfg.nerf, cfg.optimizer are now filled with values populated from my_config.yaml and arg1 ``` The CLI can with this function as follows: ``` Use just the config: > python main.py --config my_config.yaml Print help: > python main.py --help Use the config and override dataset path: > python main.py --config my_config.yaml --dataset-path data/lego/ Use the config with an arg that is used by more than one field (i.e. disambiguate from tracer.bg-color): > python main.py --config my_config.yaml --dataset.dataset-path data/lego/ Select a different optimizer variant: > python main.py --config app/nerf/configs/nerf_hash.yaml optimizer:adam Select a different optimizer and grid variants: > python main.py --config app/nerf/configs/nerf_hash.yaml optimizer:adam grid.TriplanarGrid: ``` The priority of args is determined by: 1. Args specified through CLI. 2. Args specified through config yaml. 3. Defaults defined in config dataclass. Args: config_type (type): The type for the config object. yaml_arg (str): Name of config path arg. By default this is --config. Expected to start with --. Returns: (dataclass): Config dataclass instance, initialized with parsed args.
23,066
from typing import Union, Type, TYPE_CHECKING, List, Callable, Any, Optional import dataclasses The provided code snippet includes necessary dependencies for implementing the `print_config` function. Write a Python function `def print_config(config, prefix="")` to solve the following problem: Prettyprint the config dataclass object. Args: config (dataclass): Dataclass config object. prefix (Optional[str]): If a base level indentation is desired, you can pass in a string. Here is the function: def print_config(config, prefix=""): """Prettyprint the config dataclass object. Args: config (dataclass): Dataclass config object. prefix (Optional[str]): If a base level indentation is desired, you can pass in a string. """ if hasattr(config, '__ctor_name__'): field_name = 'constructor' ctor = getattr(config, '__ctor_name__') print(f"{prefix}{field_name}: {ctor}") for field in dataclasses.fields(config): field_obj = getattr(config, field.name) if dataclasses.is_dataclass(field_obj): print(f"{prefix}{field.name}") print_config(field_obj, prefix=prefix + " ") else: if field.name.startswith('_') or field.name.endswith('_'): continue print(f"{prefix}{field.name}: {field_obj}")
Prettyprint the config dataclass object. Args: config (dataclass): Dataclass config object. prefix (Optional[str]): If a base level indentation is desired, you can pass in a string.
23,067
from typing import Union, Type, TYPE_CHECKING, List, Callable, Any, Optional import dataclasses The provided code snippet includes necessary dependencies for implementing the `write_config_to_yaml` function. Write a Python function `def write_config_to_yaml(config, path)` to solve the following problem: Write config to path as a yaml. write_config_to_path(config_object, "config.yaml") Args: config (dataclass): Dataclass config. path (str): Path to write the config file to. Here is the function: def write_config_to_yaml(config, path): """Write config to path as a yaml. write_config_to_path(config_object, "config.yaml") Args: config (dataclass): Dataclass config. path (str): Path to write the config file to. """ from ._tyro import write_config_to_yaml write_config_to_yaml(config=config, path=path)
Write config to path as a yaml. write_config_to_path(config_object, "config.yaml") Args: config (dataclass): Dataclass config. path (str): Path to write the config file to.
23,068
from typing import Union, Type, TYPE_CHECKING, List, Callable, Any, Optional import dataclasses The provided code snippet includes necessary dependencies for implementing the `get_config_target` function. Write a Python function `def get_config_target(config)` to solve the following problem: For config dataclasses generated with autoconfig() or @configure (or hydra-zen in general), this function will return the target type this config constructs when calling instantiate(). If config is not a dataclass generated with autoconfig(), @configure or hydra-zen, a TypeError is raised. Args: config (dataclass): Dataclass config. Here is the function: def get_config_target(config): """For config dataclasses generated with autoconfig() or @configure (or hydra-zen in general), this function will return the target type this config constructs when calling instantiate(). If config is not a dataclass generated with autoconfig(), @configure or hydra-zen, a TypeError is raised. Args: config (dataclass): Dataclass config. """ from hydra_zen import get_target try: return get_target(config) except TypeError as e: raise TypeError(f'get_config_target recieved a configuration class not generated with autoconfig(), @configure,' f' or hydra-zen in general: {config}. This means this config class does not instantiate' f' any target object.') from e
For config dataclasses generated with autoconfig() or @configure (or hydra-zen in general), this function will return the target type this config constructs when calling instantiate(). If config is not a dataclass generated with autoconfig(), @configure or hydra-zen, a TypeError is raised. Args: config (dataclass): Dataclass config.
23,069
import os, sys import re import yaml import itertools from typing_extensions import Annotated from typing import List, Set, Dict, Optional from collections import defaultdict import dataclasses import argparse import tyro from ._exceptions import handle_custom_errors, AmbiguousArgument The provided code snippet includes necessary dependencies for implementing the `write_config_to_yaml` function. Write a Python function `def write_config_to_yaml(config, path: str)` to solve the following problem: Writes config to path in yaml format. Usage: write_config_to_path(config_object, "config.yaml") Args: config (dataclass): Dataclass config. path (str): Path to write the config file to. Here is the function: def write_config_to_yaml(config, path: str): """Writes config to path in yaml format. Usage: write_config_to_path(config_object, "config.yaml") Args: config (dataclass): Dataclass config. path (str): Path to write the config file to. """ config_dict = dataclasses.asdict(config) # This is needed since tyro will by default use tuples instead of lists, # but yaml writers expect lists for proper behaviour. def _replace_tuple(_dict): for key in _dict: item = _dict[key] if isinstance(item, dict): _replace_tuple(item) elif isinstance(item, tuple): _dict[key] = list(item) _replace_tuple(config_dict) with open(path, 'w') as outfile: yaml.dump(config_dict, outfile)
Writes config to path in yaml format. Usage: write_config_to_path(config_object, "config.yaml") Args: config (dataclass): Dataclass config. path (str): Path to write the config file to.
23,070
from __future__ import annotations import inspect import enum import copy import typing from typing import get_type_hints, Type, Callable, List, Optional, Any from functools import lru_cache import docstring_parser from dataclasses import field import hydra_zen from hydra_zen import instantiate, builds, make_config, hydrated_dataclass from hydra_zen.typing import Builds def generate_implicit_field_types(func): func_args = typing.get_type_hints(func) func_parameters = inspect.signature(func).parameters # If arg doesn't have explicit typing but is using a default value, use the default value to infer # an implicit arg type implicit_arg_types = dict() for arg_name, param in func_parameters.items(): if arg_name not in func_args and param.default is not inspect._empty: implicit_arg_types[arg_name] = type(param.default) return implicit_arg_types
null
23,071
from __future__ import annotations from typing import List, Tuple import numpy as np import torch import kaolin.ops.spc as spc_ops import kaolin.render.spc as spc_render import wisp.ops.mesh as mesh_ops import wisp.ops.spc as wisp_spc_ops from wisp.accelstructs.base_as import BaseAS, ASQueryResults, ASRaytraceResults, ASRaymarchResults from kaolin import _C import wisp._C as wisp_C def fast_filter_method(mask_idx: torch.Tensor, depth: torch.Tensor, deltas: torch.Tensor, samples: torch.Tensor, num_samples: int, num_rays: int, device: torch.device) -> \ Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: depth_samples = depth[mask_idx[:, 0], mask_idx[:, 1]][:, None] deltas = deltas[mask_idx[:, 0], mask_idx[:, 1]].reshape(-1, 1) samples = samples[mask_idx[:, 0], mask_idx[:, 1], :] ridx = torch.arange(0, num_rays, device=device) ridx = ridx[..., None].repeat(1, num_samples)[mask_idx[:, 0], mask_idx[:, 1]] return depth_samples, deltas, samples, ridx
null
23,072
from __future__ import annotations import os from typing import Callable, Optional, Type import collections import inspect import torch from torch.utils.data._utils.collate import default_convert, default_collate_err_msg_format from wisp.core import Rays from wisp.datasets.base_datasets import WispDataset, MultiviewDataset, SDFDataset from wisp.datasets.batch import Batch def _load_dataset(dataset_baseclass: Type[WispDataset], dataset_path: str, dataset_num_workers: int = -1, transform: Callable = None, split: str = None, **kwargs) -> WispDataset: """ A convenience method which loads the dataset class which best matches the files under dataset_path. The implementation relies on the `WispDataset.is_root_of_dataset()` function being implemented by WispDataset implementations. Dataset classes are allowed to specify unique terms which set them apart from other datasets, i.e.: a dataset path with the following contents can be assumed to be attributed to the nerf-synethetic dataset: /path/to/dataset/transform.json /path/to/dataset/images/____.png Caveats: 1. Where more than one dataset class matches the contents of dataset_path, this function will trigger a RuntimeError due to a non-resolved ambiguity. In such cases, it is recommended to construct the datasets explicitly rather than using this function. 2. This function can only search for `dataset_baseclass` subclasses which were already loaded by python. When specifying custom WispDataset classes which do not exist under this folder, make sure python imported them before invoking this function. Args: dataset_baseclass (Type[WispDataset]): WispDataset or one of it's variants, to further scope which parts of the WispDataset class hierarchy are matched against the root directory. dataset_path (str): The root directory of the dataset, where dataset files should reside. dataset_num_workers (int): The number of workers to spawn for multiprocessed loading. If dataset_num_workers < 1, processing will take place on the main process. transform (Callable): Transform function applied per batch when data is accessed with __get_item__. For example: ray sampling, to filter the amount of rays returned per batch. When multiple transforms are needed, the transform callable may be a composition of multiple Callable. split (str): The dataset split to use, may correspond to which files to load, or how to partion the data. Options: 'train', 'val', 'test'. **kwargs: Any other data specific arguments will be passed to the dataset class once it is matched. Excessive kwargs the matched dataset class does not use will be safely disregarded. """ from wisp.config_parser import get_args_for_function def __subclasses_hierarchy(cls): # Get hierarchy of subclasses of cls return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in __subclasses_hierarchy(c)]) files_list = os.listdir(dataset_path) matching_dataset: Optional[Type[WispDataset]] = None for dataset in __subclasses_hierarchy(dataset_baseclass): if inspect.isabstract(dataset): continue is_match = dataset.is_root_of_dataset(root=dataset_path, files_list=files_list) if is_match: if matching_dataset is None: matching_dataset = dataset else: raise RuntimeError(f"load_dataset was given an ambiguous path which matches more than one dataset" f" class: {dataset} and {matching_dataset}. This is a result of " f"'is_root_of_dataset()' implementations which do not distinguish well enough" f"between the two dataset classes. A quick workaround is to load the desired " f"dataset class explicitly. Otherwise, update the dataset " f"'is_root_of_dataset()' logics. ") if matching_dataset is None: raise RuntimeError(f"load_dataset did match any dataset class which fits the contents of {dataset_path}. " f"Is the dataset-path valid?") # Out of all given kwargs, select those which the dataset __init__ function uses ds_args = get_args_for_function(args=kwargs, func=matching_dataset.__init__) return matching_dataset(dataset_path=dataset_path, dataset_num_workers=dataset_num_workers, transform=transform, split=split, **ds_args) class MultiviewDataset(WispDataset): """ Extends the WispDataset with dataset behavior common to all multiview datasets. """ def img_shape(self) -> Union[torch.Size, Tuple[torch.Size]]: """ Returns the shape of ground truth images of each view used by this dataset. If all images have a common size, the return value is torch.Size. If the gt images have different sizes, a Tuple of sizes is returned per view. """ pass def num_images(self) -> int: """ Returns the number of images / views this dataset contain. """ pass def cameras(self) -> Dict[str, Camera]: """ Returns the set of cameras this dataset uses to generate rays. Cameras are identifiable by unique ids / names """ return dict() def as_pointcloud(self) -> torch.FloatTensor: """ If `supports_depth()` is True, the current dataset contains depth information. This function can be used to query the depth information in the form of a pointcloud tensor. """ raise NotImplementedError('MultiviewDatasets that support depth information should ' 'return their data as a pointcloud. Otherwise, set supports_depth to return False.') def supports_depth(self) -> bool: """ Returns if this dataset have loaded depth information. """ return False def __getitem__(self, idx: int) -> MultiviewBatch: """ Samples the dataset for a multiview batch of information. The exact ray channels the batch contains are up to the dataset implementation to determine. Callers may treat the batch as a dictionary, or query MultiviewBatch.ray_values() to view which supervision channels are available. """ raise NotImplementedError('MultiviewDatasets should override __getitem__') def __len__(self): """Length of the dataset as number of views. """ return self.num_images The provided code snippet includes necessary dependencies for implementing the `load_multiview_dataset` function. Write a Python function `def load_multiview_dataset(dataset_path: str, dataset_num_workers: int = -1, transform: Callable = None, split: str = None, **kwargs) -> MultiviewDataset` to solve the following problem: A convenience method which loads the MultiviewDataset class which best matches the files under dataset_path. The implementation relies on the `WispDataset.is_root_of_dataset()` function being implemented by WispDataset implementations. Dataset classes are allowed to specify unique terms which set them apart from other datasets, i.e.: a dataset path with the following contents can be assumed to be attributed to the nerf-synethetic dataset: /path/to/dataset/transform.json /path/to/dataset/images/____.png Caveats: 1. Where more than one dataset class matches the contents of dataset_path, this function will trigger a RuntimeError due to a non-resolved ambiguity. In such cases, it is recommended to construct the datasets explicitly rather than using this function. 2. This function can only search for MultiviewDataset subclasses which were already loaded by python. When specifying custom MultiviewDataset classes which do not exist under this folder, make sure python imported them before invoking this function. Args: dataset_path (str): The root directory of the dataset, where dataset files should reside. dataset_num_workers (int): The number of workers to spawn for multiprocessed loading. If dataset_num_workers < 1, processing will take place on the main process. transform (Callable): Transform function applied per batch when data is accessed with __get_item__. For example: ray sampling, to filter the amount of rays returned per batch. When multiple transforms are needed, the transform callable may be a composition of multiple Callable. split (str): The dataset split to use, may correspond to which files to load, or how to partion the data. Options: 'train', 'val', 'test'. **kwargs: Any other data specific arguments will be passed to the dataset class once it is matched. Excessive kwargs the matched dataset class does not use will be safely disregarded. Here is the function: def load_multiview_dataset(dataset_path: str, dataset_num_workers: int = -1, transform: Callable = None, split: str = None, **kwargs) -> MultiviewDataset: """ A convenience method which loads the MultiviewDataset class which best matches the files under dataset_path. The implementation relies on the `WispDataset.is_root_of_dataset()` function being implemented by WispDataset implementations. Dataset classes are allowed to specify unique terms which set them apart from other datasets, i.e.: a dataset path with the following contents can be assumed to be attributed to the nerf-synethetic dataset: /path/to/dataset/transform.json /path/to/dataset/images/____.png Caveats: 1. Where more than one dataset class matches the contents of dataset_path, this function will trigger a RuntimeError due to a non-resolved ambiguity. In such cases, it is recommended to construct the datasets explicitly rather than using this function. 2. This function can only search for MultiviewDataset subclasses which were already loaded by python. When specifying custom MultiviewDataset classes which do not exist under this folder, make sure python imported them before invoking this function. Args: dataset_path (str): The root directory of the dataset, where dataset files should reside. dataset_num_workers (int): The number of workers to spawn for multiprocessed loading. If dataset_num_workers < 1, processing will take place on the main process. transform (Callable): Transform function applied per batch when data is accessed with __get_item__. For example: ray sampling, to filter the amount of rays returned per batch. When multiple transforms are needed, the transform callable may be a composition of multiple Callable. split (str): The dataset split to use, may correspond to which files to load, or how to partion the data. Options: 'train', 'val', 'test'. **kwargs: Any other data specific arguments will be passed to the dataset class once it is matched. Excessive kwargs the matched dataset class does not use will be safely disregarded. """ return _load_dataset(MultiviewDataset, dataset_path, dataset_num_workers, transform, split, **kwargs)
A convenience method which loads the MultiviewDataset class which best matches the files under dataset_path. The implementation relies on the `WispDataset.is_root_of_dataset()` function being implemented by WispDataset implementations. Dataset classes are allowed to specify unique terms which set them apart from other datasets, i.e.: a dataset path with the following contents can be assumed to be attributed to the nerf-synethetic dataset: /path/to/dataset/transform.json /path/to/dataset/images/____.png Caveats: 1. Where more than one dataset class matches the contents of dataset_path, this function will trigger a RuntimeError due to a non-resolved ambiguity. In such cases, it is recommended to construct the datasets explicitly rather than using this function. 2. This function can only search for MultiviewDataset subclasses which were already loaded by python. When specifying custom MultiviewDataset classes which do not exist under this folder, make sure python imported them before invoking this function. Args: dataset_path (str): The root directory of the dataset, where dataset files should reside. dataset_num_workers (int): The number of workers to spawn for multiprocessed loading. If dataset_num_workers < 1, processing will take place on the main process. transform (Callable): Transform function applied per batch when data is accessed with __get_item__. For example: ray sampling, to filter the amount of rays returned per batch. When multiple transforms are needed, the transform callable may be a composition of multiple Callable. split (str): The dataset split to use, may correspond to which files to load, or how to partion the data. Options: 'train', 'val', 'test'. **kwargs: Any other data specific arguments will be passed to the dataset class once it is matched. Excessive kwargs the matched dataset class does not use will be safely disregarded.
23,073
from __future__ import annotations import os from typing import Callable, Optional, Type import collections import inspect import torch from torch.utils.data._utils.collate import default_convert, default_collate_err_msg_format from wisp.core import Rays from wisp.datasets.base_datasets import WispDataset, MultiviewDataset, SDFDataset from wisp.datasets.batch import Batch class Batch(AttrDict): """ Represents a single batch of information sampled and collated from a WispDataset. Batches in Wisp keep a general structure by subclassing python's dictionaries and using their semantics. The exact fields each batch contain depend on the dataset type. """ def __init__(self, *args, **kwargs): super().__init__() for k, v in dict(*args, **kwargs).items(): self[k] = v def fields(self) -> List[str]: """ Returns a list of all field names the batch currently contains. Note that the content of some field values may be None in practice. """ return list(self.keys()) The provided code snippet includes necessary dependencies for implementing the `default_collate` function. Write a Python function `def default_collate(batch)` to solve the following problem: r""" Function that extends torch.utils.data._utils.collate.default_collate to support custom wisp structures such as Rays and Batches. Here is the function: def default_collate(batch): r""" Function that extends torch.utils.data._utils.collate.default_collate to support custom wisp structures such as Rays and Batches. """ elem = batch[0] elem_type = type(elem) if isinstance(elem, Rays): return Rays.cat(batch) elif isinstance(elem, Batch): # Assumes that if first element in batch has a certain None field, this field is None for all other entries too return elem_type(**{key: default_collate([d[key] for d in batch]) for key in elem if elem.get(key) is not None}) if isinstance(elem, torch.Tensor): out = None if torch.utils.data.get_worker_info() is not None: # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum(x.numel() for x in batch) storage = elem.storage()._new_shared(numel, device=elem.device) out = elem.new(storage).resize_(len(batch), *list(elem.size())) return torch.stack(batch, 0, out=out) elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ and elem_type.__name__ != 'string_': if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap': # array of string classes and object if np_str_obj_array_pattern.search(elem.dtype.str) is not None: raise TypeError(default_collate_err_msg_format.format(elem.dtype)) return default_collate([torch.as_tensor(b) for b in batch]) elif elem.shape == (): # scalars return torch.as_tensor(batch) elif isinstance(elem, float): return torch.tensor(batch, dtype=torch.float64) elif isinstance(elem, int): return torch.tensor(batch) elif isinstance(elem, (str, bytes)): return batch elif isinstance(elem, collections.abc.Mapping): try: return elem_type({key: default_collate([d[key] for d in batch]) for key in elem}) except TypeError: # The mapping type may not support `__init__(iterable)`. return {key: default_collate([d[key] for d in batch]) for key in elem} elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple return elem_type(*(default_collate(samples) for samples in zip(*batch))) elif isinstance(elem, collections.abc.Sequence): # check to make sure that the elements in batch have consistent size it = iter(batch) elem_size = len(next(it)) if not all(len(elem) == elem_size for elem in it): raise RuntimeError('each element in list of batch should be of equal size') transposed = list(zip(*batch)) # It may be accessed twice, so we use a list. if isinstance(elem, tuple): return [default_collate(samples) for samples in transposed] # Backwards compatibility. else: try: return elem_type([default_collate(samples) for samples in transposed]) except TypeError: # The sequence type may not support `__init__(iterable)` (e.g., `range`). return [default_collate(samples) for samples in transposed] raise TypeError(default_collate_err_msg_format.format(elem_type))
r""" Function that extends torch.utils.data._utils.collate.default_collate to support custom wisp structures such as Rays and Batches.
23,074
from typing import Callable, Tuple, Union from copy import deepcopy import unittest import random import numpy as np import torch from kaolin.render.camera import Camera from kaolin.render.camera.extrinsics import CameraExtrinsics from torch.utils.data import Dataset from wisp.utils import DotDict from wisp.ops.raygen import generate_pinhole_rays, generate_centered_pixel_coords def spherical_eye( radius=1, theta=np.pi / 3, phi=0, ): return torch.FloatTensor( [ radius * np.sin(theta) * np.sin(phi), radius * np.cos(theta), radius * np.sin(theta) * np.cos(phi), ], ) # [3] The provided code snippet includes necessary dependencies for implementing the `spherical_coord_to_pose` function. Write a Python function `def spherical_coord_to_pose( radius=1, theta=np.pi / 3, phi=0, up=torch.FloatTensor([0, 1, 0]) )` to solve the following problem: generate camera pose from a spherical coordinate Args: size: batch size of generated poses. device: where to allocate the output. radius: camera radius theta_range: [min, max], should be in [0, pi] phi_range: [min, max], should be in [0, 2 * pi] Return: poses: [size, 4, 4] in OpenGL convention Here is the function: def spherical_coord_to_pose( radius=1, theta=np.pi / 3, phi=0, up=torch.FloatTensor([0, 1, 0]) ): """generate camera pose from a spherical coordinate Args: size: batch size of generated poses. device: where to allocate the output. radius: camera radius theta_range: [min, max], should be in [0, pi] phi_range: [min, max], should be in [0, 2 * pi] Return: poses: [size, 4, 4] in OpenGL convention """ eye = spherical_eye(radius, theta, phi) # lookat def normalize(vec): return torch.nn.functional.normalize(vec, dim=-1) backward = -normalize(eye) right = normalize(torch.cross(backward, up, dim=-1)) up = normalize(torch.cross(right, backward, dim=-1)) world_rot = torch.stack((right, up, -backward), dim=1) world_tran = -world_rot @ eye.unsqueeze(-1) return CameraExtrinsics._from_world_in_cam_coords( rotation=world_rot, translation=world_tran, device="cpu", requires_grad=False )
generate camera pose from a spherical coordinate Args: size: batch size of generated poses. device: where to allocate the output. radius: camera radius theta_range: [min, max], should be in [0, pi] phi_range: [min, max], should be in [0, 2 * pi] Return: poses: [size, 4, 4] in OpenGL convention
23,075
import logging import sys import pprint The provided code snippet includes necessary dependencies for implementing the `default_log_setup` function. Write a Python function `def default_log_setup(level=logging.INFO)` to solve the following problem: Sets up default logging, always logging to stdout. :param level: logging level, e.g. logging.INFO Here is the function: def default_log_setup(level=logging.INFO): """ Sets up default logging, always logging to stdout. :param level: logging level, e.g. logging.INFO """ handlers = [logging.StreamHandler(sys.stdout)] # TODO: better to also use loggers per file and add %(name)15s logging.basicConfig(level=level, format='%(asctime)s|%(levelname)8s| %(message)s', handlers=handlers)
Sets up default logging, always logging to stdout. :param level: logging level, e.g. logging.INFO
23,076
import logging import sys import pprint The provided code snippet includes necessary dependencies for implementing the `args_to_log_format` function. Write a Python function `def args_to_log_format(args_dict) -> str` to solve the following problem: Convert args hierarchy to string representation suitable for logging (i.e. with Tensorboard). Args: args_dict : The parsed arguments, grouped within a dictionary. Returns: arg_str : The args encoded in a string format. Here is the function: def args_to_log_format(args_dict) -> str: """Convert args hierarchy to string representation suitable for logging (i.e. with Tensorboard). Args: args_dict : The parsed arguments, grouped within a dictionary. Returns: arg_str : The args encoded in a string format. """ pp = pprint.PrettyPrinter(indent=2) args_str = pp.pformat(args_dict) args_str = f'```{args_str}```' return args_str
Convert args hierarchy to string representation suitable for logging (i.e. with Tensorboard). Args: args_dict : The parsed arguments, grouped within a dictionary. Returns: arg_str : The args encoded in a string format.
23,077
from pydispatch import dispatcher The provided code snippet includes necessary dependencies for implementing the `watch` function. Write a Python function `def watch(watched_obj, field, status, handler)` to solve the following problem: registers the handler for status updates on watched_obj.field. For example: watch(scene_status, "cam_controller", "changed", app.on_camera_controller_changed) Here is the function: def watch(watched_obj, field, status, handler): """ registers the handler for status updates on watched_obj.field. For example: watch(scene_status, "cam_controller", "changed", app.on_camera_controller_changed) """ dispatcher.connect(handler, (status, field), sender=watched_obj)
registers the handler for status updates on watched_obj.field. For example: watch(scene_status, "cam_controller", "changed", app.on_camera_controller_changed)
23,078
from pydispatch import dispatcher def _register_func(cls): # __setattr__ already explicitly defined, use it as internal setter implementation if '__setattr__' in cls.__dict__: setter_func = cls.__dict__['__setattr__'] else: # __setattr__ not defined, use the default implementation which simply sets the attribute def _setter_func(obj, key, value): obj.__dict__[key] = value setter_func = _setter_func setattr(cls, '_setattr', setter_func) setattr(cls, '__setattr__', __setattr_notify__) return cls The provided code snippet includes necessary dependencies for implementing the `watchedfields` function. Write a Python function `def watchedfields(cls=None)` to solve the following problem: Returns the class augmented with a custom __setattr__ implementation which notifies subscribers when class fields are updated. Here is the function: def watchedfields(cls=None): """ Returns the class augmented with a custom __setattr__ implementation which notifies subscribers when class fields are updated. """ def wrap(cls): return _register_func(cls) if cls is None: # Called as @watchedfields() return wrap else: return wrap(cls) # Called as @watchedfields
Returns the class augmented with a custom __setattr__ implementation which notifies subscribers when class fields are updated.
23,079
from pydispatch import dispatcher class watcheddict(dict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__class__ = type(dict.__name__, (self.__class__, dict), {}) def __setitem__(self, item, value): super().__setitem__(item, value) dispatcher.send(('updated', self), self, value=item) class watchedlist(list): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__class__ = type(list.__name__, (self.__class__, list), {}) def __setitem__(self, item, value): super().__setitem__(item, value) dispatcher.send(('updated', self), self, value=item) def wrap_iterable_fields(value): if isinstance(value, dict): return watcheddict(value) elif isinstance(value, list): return watchedlist(value) else: return value
null
23,080
import torch import torch.nn as nn import torch.nn.functional as F class FullSort(nn.Module): """The "FullSort" activation function from https://arxiv.org/abs/1811.05381. """ def forward(self, x): """Sorts the feature dimension. Args: x (torch.FloatTensor): Some tensor of shape [..., feature_size] Returns: (torch.FloatTensor): Activation of shape [..., feature_size] """ return torch.sort(x, dim=-1)[0] class MinMax(nn.Module): """The "MinMax" activation function from https://arxiv.org/abs/1811.05381. """ def forward(self, x): """Partially sorts the feature dimension. The feature dimension needs to be a multiple of 2. Args: x (torch.FloatTensor): Some tensor of shape [batch, feature_size] Returns: (torch.FloatTensor): Activation of shape [batch, feature_size] """ N, M = x.shape x = x.reshape(N, M//2, 2) return torch.cat([x.min(-1, keepdim=True)[0], x.max(-1, keepdim=True)[0]], dim=-1).reshape(N, M) class Identity(nn.Module): """Identity function. Occasionally useful. """ def forward(self, x): """Returns the input. :) Args: x (Any): Anything Returns: (Any): The input! """ return x The provided code snippet includes necessary dependencies for implementing the `get_activation_class` function. Write a Python function `def get_activation_class(activation_type)` to solve the following problem: Utility function to return an activation function class based on the string description. Args: activation_type (str): The name for the activation function. Returns: (Function): The activation function to be used. Here is the function: def get_activation_class(activation_type): """Utility function to return an activation function class based on the string description. Args: activation_type (str): The name for the activation function. Returns: (Function): The activation function to be used. """ if activation_type == 'none': return Identity() elif activation_type == 'fullsort': return FullSort() elif activation_type == 'minmax': return MinMax() elif activation_type == 'relu': return torch.relu elif activation_type == 'sin': return torch.sin elif activation_type == 'celu': return F.celu elif activation_type == 'selu': return F.selu elif activation_type == 'leaky_relu': return F.leaky_relu elif activation_type == 'gelu': return F.gelu else: assert False and "activation type does not exist"
Utility function to return an activation function class based on the string description. Args: activation_type (str): The name for the activation function. Returns: (Function): The activation function to be used.
23,081
from typing import Dict, Any import torch import torch.nn as nn from wisp.core import WispModule from scipy.stats import ortho_group The provided code snippet includes necessary dependencies for implementing the `orthonormal` function. Write a Python function `def orthonormal(weight)` to solve the following problem: Initialize the layer as a random orthonormal matrix. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Only used for the shape. Returns: (torch.FloatTensor): Matrix of shape [M, N]. Here is the function: def orthonormal(weight): """Initialize the layer as a random orthonormal matrix. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Only used for the shape. Returns: (torch.FloatTensor): Matrix of shape [M, N]. """ m = ortho_group.rvs(dim=max(weight.shape)) #m = np.dot(m.T, m) m = m[:weight.shape[0],:weight.shape[1]] return torch.from_numpy(m).float()
Initialize the layer as a random orthonormal matrix. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Only used for the shape. Returns: (torch.FloatTensor): Matrix of shape [M, N].
23,082
from typing import Dict, Any import torch import torch.nn as nn from wisp.core import WispModule from scipy.stats import ortho_group def svd(weight): """Initialize the layer with the U,V of SVD. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N]. """ U,S,V = torch.svd(weight) return torch.matmul(U, V.T) The provided code snippet includes necessary dependencies for implementing the `spectral_normalization` function. Write a Python function `def spectral_normalization(weight)` to solve the following problem: Initialize the layer with spectral normalization. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N]. Here is the function: def spectral_normalization(weight): """Initialize the layer with spectral normalization. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N]. """ U,S,V = torch.svd(weight) return weight / S.max()
Initialize the layer with spectral normalization. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N].
23,083
from typing import Dict, Any import torch import torch.nn as nn from wisp.core import WispModule from scipy.stats import ortho_group The provided code snippet includes necessary dependencies for implementing the `identity` function. Write a Python function `def identity(weight)` to solve the following problem: Initialize the layer with identity matrix. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N]. Here is the function: def identity(weight): """Initialize the layer with identity matrix. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N]. """ return torch.diag(torch.ones(weight.shape[0]))
Initialize the layer with identity matrix. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N].
23,084
from typing import Dict, Any import torch import torch.nn as nn from wisp.core import WispModule from scipy.stats import ortho_group The provided code snippet includes necessary dependencies for implementing the `average` function. Write a Python function `def average(weight)` to solve the following problem: Initialize the layer by normalizing the weights. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N]. Here is the function: def average(weight): """Initialize the layer by normalizing the weights. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N]. """ return weight / weight.sum()
Initialize the layer by normalizing the weights. Args: weight (torch.FloatTensor): Matrix of shape [M, N]. Returns: (torch.FloatTensor): Matrix of shape [M, N].
23,085
from typing import Dict, Any import torch import torch.nn as nn from wisp.core import WispModule class PositionalEmbedder(WispModule): """PyTorch implementation of regular positional embedding, as used in the original NeRF and Transformer papers. """ def __init__(self, num_freq, max_freq_log2, log_sampling=True, include_input=True, input_dim=3): """Initialize the module. Args: num_freq (int): The number of frequency bands to sample. max_freq_log2 (int): The maximum frequency. The bands will be sampled at regular intervals in [0, 2^max_freq_log2]. log_sampling (bool): If true, will sample frequency bands in log space. include_input (bool): If true, will concatenate the input. input_dim (int): The dimension of the input coordinate space. Returns: (void): Initializes the encoding. """ super().__init__() self.num_freq = num_freq self.max_freq_log2 = max_freq_log2 self.log_sampling = log_sampling self.include_input = include_input self.out_dim = 0 if include_input: self.out_dim += input_dim if self.log_sampling: self.bands = 2.0**torch.linspace(0.0, max_freq_log2, steps=num_freq) else: self.bands = torch.linspace(1, 2.0**max_freq_log2, steps=num_freq) # The out_dim is really just input_dim + num_freq * input_dim * 2 (for sin and cos) self.out_dim += self.bands.shape[0] * input_dim * 2 self.bands = nn.Parameter(self.bands).requires_grad_(False) def forward(self, coords): """Embeds the coordinates. Args: coords (torch.FloatTensor): Coordinates of shape [N, input_dim] Returns: (torch.FloatTensor): Embeddings of shape [N, input_dim + out_dim] or [N, out_dim]. """ N = coords.shape[0] winded = (coords[:,None] * self.bands[None,:,None]).reshape( N, coords.shape[1] * self.num_freq) encoded = torch.cat([torch.sin(winded), torch.cos(winded)], dim=-1) if self.include_input: encoded = torch.cat([coords, encoded], dim=-1) return encoded def name(self) -> str: """ A human readable name for the given wisp module. """ return "Positional Encoding" def public_properties(self) -> Dict[str, Any]: """ Wisp modules expose their public properties in a dictionary. The purpose of this method is to give an easy table of outwards facing attributes, for the purpose of logging, gui apps, etc. """ return { "Output Dim": self.out_dim, "Num. Frequencies": self.num_freq, "Max Frequency": f"2^{self.max_freq_log2}", "Include Input": self.include_input } The provided code snippet includes necessary dependencies for implementing the `get_positional_embedder` function. Write a Python function `def get_positional_embedder(frequencies, input_dim=3, include_input=True)` to solve the following problem: Utility function to get a positional encoding embedding. Args: frequencies (int): The number of frequencies used to define the PE: [2^0, 2^1, 2^2, ... 2^(frequencies - 1)]. input_dim (int): The input coordinate dimension. include_input (bool): If true, will concatenate the input coords. Returns: (nn.Module, int): - The embedding module - The output dimension of the embedding. Here is the function: def get_positional_embedder(frequencies, input_dim=3, include_input=True): """Utility function to get a positional encoding embedding. Args: frequencies (int): The number of frequencies used to define the PE: [2^0, 2^1, 2^2, ... 2^(frequencies - 1)]. input_dim (int): The input coordinate dimension. include_input (bool): If true, will concatenate the input coords. Returns: (nn.Module, int): - The embedding module - The output dimension of the embedding. """ encoder = PositionalEmbedder(frequencies, frequencies-1, input_dim=input_dim, include_input=include_input) return encoder, encoder.out_dim
Utility function to get a positional encoding embedding. Args: frequencies (int): The number of frequencies used to define the PE: [2^0, 2^1, 2^2, ... 2^(frequencies - 1)]. input_dim (int): The input coordinate dimension. include_input (bool): If true, will concatenate the input coords. Returns: (nn.Module, int): - The embedding module - The output dimension of the embedding.
23,086
The provided code snippet includes necessary dependencies for implementing the `position` function. Write a Python function `def position(position, features, layers, activation)` to solve the following problem: Use the position as input (i.e. no conditioning) Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function Here is the function: def position(position, features, layers, activation): """Use the position as input (i.e. no conditioning) Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function """ h = position for i, l in enumerate(layers): h = activation(l(h)) return h
Use the position as input (i.e. no conditioning) Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function
23,087
The provided code snippet includes necessary dependencies for implementing the `feature` function. Write a Python function `def feature(position, features, layers, activation)` to solve the following problem: Use the features as input. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function Here is the function: def feature(position, features, layers, activation): """Use the features as input. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function """ h = features for i, l in enumerate(layers): h = activation(l(h)) return h
Use the features as input. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function
23,088
The provided code snippet includes necessary dependencies for implementing the `concat` function. Write a Python function `def concat(position, features, layers, activation)` to solve the following problem: Concatenates the input onto the features, and then feeds into the input of the neural network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function Here is the function: def concat(position, features, layers, activation): """Concatenates the input onto the features, and then feeds into the input of the neural network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function """ h = torch.cat([position, features], dim=-1) for i, l in enumerate(layers): h = activation(l(h)) return h
Concatenates the input onto the features, and then feeds into the input of the neural network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function
23,089
The provided code snippet includes necessary dependencies for implementing the `film_linear` function. Write a Python function `def film_linear(position, features, layers, activation)` to solve the following problem: Applies film conditioning (multiply only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function Here is the function: def film_linear(position, features, layers, activation): """Applies film conditioning (multiply only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function """ feature_shape = features.shape[:-1] feature_dim = features.shape[-1] num_hidden = len(layers) # Maybe add assertion here... but if it errors, your feature_dim size is wrong features = features.reshape(features_shape, num_hidden, feature_dim // num_hidden) h = position for i, l in enumerate(layers): # Maybe also add another assertion here h = activation(l(h) * features[..., i, :]) return h
Applies film conditioning (multiply only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function
23,090
The provided code snippet includes necessary dependencies for implementing the `film_translate` function. Write a Python function `def film_translate(position, features, layers, activation)` to solve the following problem: Applies film conditioning (add only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function Here is the function: def film_translate(position, features, layers, activation): """Applies film conditioning (add only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function """ feature_shape = features.shape[:-1] feature_dim = features.shape[-1] num_hidden = len(layers) # Maybe add assertion here... but if it errors, your feature_dim size is wrong features = features.reshape(features_shape, num_hidden, feature_dim // num_hidden) h = position for i, l in enumerate(layers): # Maybe also add another assertion here h = activation(l(h) + features[..., i, :]) return h
Applies film conditioning (add only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function
23,091
The provided code snippet includes necessary dependencies for implementing the `film` function. Write a Python function `def film(position, features, layers, activation)` to solve the following problem: Applies film conditioning (add only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function Here is the function: def film(position, features, layers, activation): """Applies film conditioning (add only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function """ feature_shape = features.shape[:-1] feature_dim = features.shape[-1] num_hidden = len(layers) # Maybe add assertion here... but if it errors, your feature_dim size is wrong features = features.reshape(features_shape, 2, num_hidden, feature_dim // num_hidden) h = position for i, l in enumerate(layers): # Maybe also add another assertion here h = activation(l(h) * features[..., 0, i, :] + features[..., 1, i, :]) return h
Applies film conditioning (add only) on the network. Args: position : [N, ..., d] tensor of coordinates features : [N, ..., f] tensor of features layers : nn.ModuleList of layers activation : activation function
23,092
import torch import torch.nn as nn import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `normalize_frobenius` function. Write a Python function `def normalize_frobenius(x)` to solve the following problem: Normalizes the matrix according to the Frobenius norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix. Here is the function: def normalize_frobenius(x): """Normalizes the matrix according to the Frobenius norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix. """ norm = torch.sqrt((torch.abs(x)**2).sum()) return x / norm
Normalizes the matrix according to the Frobenius norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix.
23,093
import torch import torch.nn as nn import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `normalize_L_1` function. Write a Python function `def normalize_L_1(x)` to solve the following problem: Normalizes the matrix according to the L1 norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix. Here is the function: def normalize_L_1(x): """Normalizes the matrix according to the L1 norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix. """ abscolsum = torch.sum(torch.abs(x), dim=0) abscolsum = torch.min(torch.stack([1.0/abscolsum, torch.ones_like(abscolsum)], dim=0), dim=0)[0] return x * abscolsum[None,:]
Normalizes the matrix according to the L1 norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix.
23,094
import torch import torch.nn as nn import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `normalize_L_inf` function. Write a Python function `def normalize_L_inf(x)` to solve the following problem: Normalizes the matrix according to the Linf norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix. Here is the function: def normalize_L_inf(x): """Normalizes the matrix according to the Linf norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix. """ absrowsum = torch.sum(torch.abs(x), axis=1) absrowsum = torch.min(torch.stack([1.0/absrowsum, torch.ones_like(absrowsum)], dim=0), dim=0)[0] return x * absrowsum[:,None]
Normalizes the matrix according to the Linf norm. Args: x (torch.FloatTensor): A matrix. Returns: (torch.FloatTensor): A normalized matrix.
23,095
import torch import torch.nn as nn import torch.nn.functional as F class FrobeniusLinear(nn.Module): """A standard Linear layer which applies a Frobenius normalization in the forward pass. """ def __init__(self, *args, **kwargs): super().__init__() self.linear = nn.Linear(*args, **kwargs) def forward(self, x): weight = normalize_frobenius(self.linear.weight) return F.linear(x, weight, self.linear.bias) class L_1_Linear(nn.Module): """A standard Linear layer which applies a L1 normalization in the forward pass. """ def __init__(self, *args, **kwargs): super().__init__() self.linear = nn.Linear(*args, **kwargs) def forward(self, x): weight = normalize_L_1(self.linear.weight) return F.linear(x, weight, self.linear.bias) class L_inf_Linear(nn.Module): """A standard Linear layer which applies a Linf normalization in the forward pass. """ def __init__(self, *args, **kwargs): super().__init__() self.linear = nn.Linear(*args, **kwargs) def forward(self, x): weight = normalize_L_inf(self.linear.weight) return F.linear(x, weight, self.linear.bias) def spectral_norm_(*args, **kwargs): """Initializes a spectral norm layer. """ return nn.utils.spectral_norm(nn.Linear(*args, **kwargs)) The provided code snippet includes necessary dependencies for implementing the `get_layer_class` function. Write a Python function `def get_layer_class(layer_type)` to solve the following problem: Convenience function to return the layer class name from text. Args: layer_type (str): Text name for the layer. Retunrs: (nn.Module): The layer to be used for the decoder. Here is the function: def get_layer_class(layer_type): """Convenience function to return the layer class name from text. Args: layer_type (str): Text name for the layer. Retunrs: (nn.Module): The layer to be used for the decoder. """ if layer_type == 'none' or layer_type == 'linear': return nn.Linear elif layer_type == 'spectral_norm': return spectral_norm_ elif layer_type == 'frobenius_norm': return FrobeniusLinear elif layer_type == "l_1_norm": return L_1_Linear elif layer_type == "l_inf_norm": return L_inf_Linear else: assert(False and "layer type does not exist")
Convenience function to return the layer class name from text. Args: layer_type (str): Text name for the layer. Retunrs: (nn.Module): The layer to be used for the decoder.
23,096
import time import torch class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' The provided code snippet includes necessary dependencies for implementing the `colorize_time` function. Write a Python function `def colorize_time(elapsed)` to solve the following problem: Returns colors based on the significance of the time elapsed. Here is the function: def colorize_time(elapsed): """Returns colors based on the significance of the time elapsed. """ if elapsed > 1e-3: return bcolors.FAIL + "{:.3e}".format(elapsed) + bcolors.ENDC elif elapsed > 1e-4: return bcolors.WARNING + "{:.3e}".format(elapsed) + bcolors.ENDC elif elapsed > 1e-5: return bcolors.OKBLUE + "{:.3e}".format(elapsed) + bcolors.ENDC else: return "{:.3e}".format(elapsed)
Returns colors based on the significance of the time elapsed.
23,097
import time import torch The provided code snippet includes necessary dependencies for implementing the `print_gpu_memory` function. Write a Python function `def print_gpu_memory()` to solve the following problem: Prints GPU memory used. Here is the function: def print_gpu_memory(): """Prints GPU memory used. """ torch.cuda.empty_cache() print(f"{torch.cuda.memory_allocated()//(1024*1024)} mb")
Prints GPU memory used.
23,098
import os import urllib.request import re listpath="./model-list" def find_Filename(keyword): model_list=[] for filename in os.listdir(listpath): model_file=filename.casefold().split("_") if keyword[0]=="all": model_list.append(filename) elif keyword[0]!=model_file[0]: continue else: if len(keyword)==1: model_list.append(filename) else: l=len(keyword[1]) if l>len(model_file[1]): continue elif keyword[1]==model_file[1][:l]: model_list.append(filename) return model_list
null
23,099
import os import urllib.request import re def yaml2list(txt): yaml_list=[] for line in txt: line=line[:-1] line_list=line.split(":") if len(line_list)>=2: new_line_list=[] hppts_flag=0 for x in line_list: if hppts_flag==0: new_line_list.append(x) else: new_line_list[-1]=new_line_list[-1]+":"+x if x==" https" or x==" http": hppts_flag=1 new_line=[] for x in new_line_list: blank_l=0 for c in x: if c==' ' or c=="-": blank_l=blank_l+1 else: break new_line.append(x[blank_l:]) yaml_list.append(new_line) return yaml_list def read_Ymal(yamlPathList): loadLinkDict={} for yamlPath in yamlPathList: yaml_txt=open(yamlPath) yaml_list=yaml2list(yaml_txt) index='' for i,ss in enumerate(yaml_list): if ss[0]=='board': index=ss[1] if index not in loadLinkDict: loadLinkDict.update({index:[]}) elif ss[0]=='download link': loadLinkDict[index].append(ss[1]) loadLinkList=[] print("chose model type") print('0:','all') loadLinkDictName=[] for i, name in enumerate(loadLinkDict): print(i+1,":",name) loadLinkDictName.append(name) num = int(input("input num:")) if num==0: for i, name in enumerate(loadLinkDict): for link in loadLinkDict[name]: loadLinkList.append(link) else: name=loadLinkDictName[num-1] loadLinkList=loadLinkDict[name] return loadLinkList
null
23,100
import os import urllib.request import re def process_bar(percent, start_str='', end_str='', total_length=0): def download(loadLinkList): def Schedule(a,b,c): per=100.0*a*b/c if per >100: per=100 end_str = '100%' process_bar(per/100, start_str='', end_str=end_str, total_length=15) for load_Link in loadLinkList: f = load_Link.index("filename=") name = load_Link[f + 9:] print(name) urllib.request.urlretrieve(load_Link, name,Schedule) print() print("done")
null
23,101
from nndct_shared.utils.tensor_util import DataFormatMap from typing import List def num_remaining_channels(num_channels, ratio, channel_divisible): if num_channels <= channel_divisible: return num_channels value = int((1 - ratio) * num_channels) return max( channel_divisible, int(value + channel_divisible / 2) // channel_divisible * channel_divisible)
null
23,102
from nndct_shared.utils.tensor_util import DataFormatMap from typing import List class DataFormatMap(object): """A dict mapping of framework and op type to its data format. """ _blob_format_map = { FrameworkType.NNDCT: { 2: "NH", 3: "NLC", 4: "NHWC", 5: "NHWDC" }, FrameworkType.TORCH: { 2: "NH", 3: "NCL", 4: "NCHW", 5: "NCDHW" }, # TF format generated in runtime. } _parameter_format_map = { FrameworkType.NNDCT: { 2: "OI", 3: "OLI", 4: "OHWI", 5: "OHWDI" }, FrameworkType.TORCH: { 2: "OI", 3: "OIL", 4: "OIHW", 5: "OIDHW" }, FrameworkType.TENSORFLOW: { 2: "IO", 3: "LIO", 4: "HWIO", 5: "DHWIO", } } def blob_format(cls, framework_type, ndim): if framework_type not in cls._blob_format_map: raise KeyError( "Framework type '{}' not supported now.".format(framework_type)) return cls._blob_format_map[framework_type][ndim] def param_format(cls, framework_type, ndim): if framework_type not in cls._parameter_format_map: raise KeyError( "Framework type '{}' not supported now.".format(framework_type)) return cls._parameter_format_map[framework_type][ndim] def out_in_axis(dim): layout = DataFormatMap.param_format('nndct', dim) return layout.index('O'), layout.index('I')
null
23,103
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import copy import json import numpy as np import os from typing import List from nndct_shared.base.key_names import FrameworkType from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import pruning_lib from nndct_shared.pruning import utils from nndct_shared.pruning.pruning_lib import is_depthwise_conv from nndct_shared.pruning.pruning_lib import is_grouped_conv from nndct_shared.pruning.utils import generate_indices_group from nndct_shared.utils import io from nndct_shared.utils.tensor_util import param_layout_transformer class ModulePruningInfoGenerator(object): def __init__(self, nodename_to_modulename=None, nodes_pruning_info=None, module_pruning_info=None): def _generate_module_pruning_info(self): def nodename_to_modulename(self): def nodename_to_modulename(self, nodename_to_modulename): def nodes_pruning_info(self): def nodes_pruning_info(self, nodes_pruning_info): def module_pruning_info(self): def serialize(self): def deserialize(cls, data): def to_json(self): import os if not os.path.exists(BASE_DIR): os.makedirs(BASE_DIR) def save_pruning_info(nodename_to_modulename, nodes_pruning_info, filepath): io.create_work_dir(os.path.dirname(filepath)) with open(filepath, 'w') as f: f.write( ModulePruningInfoGenerator(nodename_to_modulename, nodes_pruning_info).to_json())
null
23,104
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import os import pickle from nndct_shared.pruning import logging from nndct_shared.pruning import pruning_lib from nndct_shared.utils import io, logging from typing import Mapping, Any, List import json class NetSensitivity(object): """The sensitivity results of the network generated by model analysis.""" def __init__(self, groups: List[GroupMetrics] = [], graph_digest: str = None): self._groups = [g for g in groups] self._uncompleted_steps = [] self._graph_digest: str = graph_digest def add_group(self, nodes, metrics, num_groups: int = 1): self._groups.append(GroupMetrics(nodes, metrics, num_groups)) def uncompleted_steps(self): return self._uncompleted_steps def uncompleted_steps(self, uncompleted_steps): self._uncompleted_steps = uncompleted_steps def graph_digest(self) -> str: return self._graph_digest def graph_digest(self, graph_digest: str) -> None: self._graph_digest = graph_digest def groups(self): return self._groups def groups(self, groups): self._groups = groups def prunable_groups_by_threshold(self, threshold, excludes=[]): prunable_groups = [] for group in self._groups: skip = False for node in group.nodes: if node in excludes: skip = True break if skip: continue # Four common metric distributions: # w/o negative: # [120, 100, 54, ..., 0.14, 0.08] # [0.1, 0.2, 0.5, ..., 1.3, 1.8] # # with negative: # [0.85, 0.63, ..., 0.05, -0.01, -0.9] # [-40, -30, ..., 11, 50, 70] res = None baseline_value = group.metrics[0].value for sparsity, value in group.metrics: bias = baseline_value - value if abs(bias / (baseline_value + 1e-5)) > threshold: break res = sparsity if res is not None: prunable_groups.append( pruning_lib.PrunableGroup(group.nodes, res, group.num_groups)) return prunable_groups def __repr__(self): strs = [] uncompleted_steps = ','.join(str(i) for i in self._uncompleted_steps) strs.append('uncompleted_steps={%s}' % uncompleted_steps) for group in self._groups: strs.append(repr(group)) return "\n".join(strs) def save_sens(sens: NetSensitivity, filepath: str): sens_info = { 'groups': [group.serialize() for group in sens.groups], 'uncompleted_steps': sens.uncompleted_steps, 'graph_digest': sens.graph_digest } with open(filepath, 'w') as f: json.dump(sens_info, f, indent=2)
null
23,105
from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import os import pickle from nndct_shared.pruning import logging from nndct_shared.pruning import pruning_lib from nndct_shared.utils import io, logging from typing import Mapping, Any, List import json def load_sens(filepath): sens = NetSensitivity() try: with open(filepath, 'rb') as f: sens_info = pickle.load(f) sens.groups = sens_info['groups'] sens.uncompleted_steps = sens_info['uncompleted_steps'] sens.graph_digest = sens_info.get("graph_digest") except Exception as e: logging.info(f"Reading sens from file {filepath} with pickle failed: {e}") logging.info(f"Now try reading sens from file {filepath} with json") with open(filepath, "r") as f: sens_info = json.load(f) sens.groups = [ GroupMetrics.deserialize(group) for group in sens_info["groups"] ] sens.uncompleted_steps = sens_info["uncompleted_steps"] sens.graph_digest = sens_info.get("graph_digest") return sens def inspect_sens(filepath): print(load_sens(filepath))
null
23,106
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging as _logging import os as _os import sys as _sys import time as _time import traceback as _traceback from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import WARN from logging import NOTSET import threading def _log_prefix(level, timestamp=None, file_and_line=None): """Generate a nndct logline prefix.""" # pylint: disable=global-variable-not-assigned global _level_names # pylint: enable=global-variable-not-assigned # Record current time now = timestamp or _time.time() now_tuple = _time.localtime(now) now_microsecond = int(1e6 * (now % 1.0)) (filename, line) = file_and_line or _get_file_and_line() basename = _os.path.basename(filename) # Severity string severity = 'I' if level in _level_names: severity = _level_names[level][0] s = '%c%02d%02d %02d:%02d:%02d.%06d %s]' % ( severity, now_tuple[1], # month now_tuple[2], # day now_tuple[3], # hour now_tuple[4], # min now_tuple[5], # sec now_microsecond, basename) return s def get_logger(): """Return logger instance.""" global _logger # Use double-checked locking to avoid taking lock unnecessarily. if _logger: return _logger _logger_lock.acquire() try: if _logger: return _logger # Scope the TensorFlow logger to not conflict with users' loggers. logger = _logging.getLogger('nndct') logger.setLevel(1) # Override findCaller on the logger to skip internal helper functions logger.findCaller = _logger_find_caller # Don't further configure the TensorFlow logger if the root logger is # already configured. This prevents double logging in those cases. if not _logging.getLogger().handlers: # Determine whether we are in an interactive environment _interactive = False try: # This is only defined in interactive shells. if _sys.ps1: _interactive = True except AttributeError: # Even now, we may be in an interactive shell with `python -i`. _interactive = _sys.flags.interactive # If we are in an interactive environment (like Jupyter), set loglevel # to INFO and pipe the output to stdout. if _interactive: #logger.setLevel(INFO) _logging_target = _sys.stdout else: _logging_target = _sys.stderr # Add the output handler. _handler = _logging.StreamHandler(_logging_target) _handler.setFormatter(_logging.Formatter(_logging_fmt, None)) logger.addHandler(_handler) _logger = logger return _logger finally: _logger_lock.release() def debug(msg, *args, **kwargs): extra = {'nndct_prefix': _log_prefix(DEBUG)} get_logger().debug(msg, extra=extra, *args, **kwargs)
null
23,107
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging as _logging import os as _os import sys as _sys import time as _time import traceback as _traceback from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import WARN from logging import NOTSET import threading def _log_prefix(level, timestamp=None, file_and_line=None): """Generate a nndct logline prefix.""" # pylint: disable=global-variable-not-assigned global _level_names # pylint: enable=global-variable-not-assigned # Record current time now = timestamp or _time.time() now_tuple = _time.localtime(now) now_microsecond = int(1e6 * (now % 1.0)) (filename, line) = file_and_line or _get_file_and_line() basename = _os.path.basename(filename) # Severity string severity = 'I' if level in _level_names: severity = _level_names[level][0] s = '%c%02d%02d %02d:%02d:%02d.%06d %s]' % ( severity, now_tuple[1], # month now_tuple[2], # day now_tuple[3], # hour now_tuple[4], # min now_tuple[5], # sec now_microsecond, basename) return s def get_logger(): """Return logger instance.""" global _logger # Use double-checked locking to avoid taking lock unnecessarily. if _logger: return _logger _logger_lock.acquire() try: if _logger: return _logger # Scope the TensorFlow logger to not conflict with users' loggers. logger = _logging.getLogger('nndct') logger.setLevel(1) # Override findCaller on the logger to skip internal helper functions logger.findCaller = _logger_find_caller # Don't further configure the TensorFlow logger if the root logger is # already configured. This prevents double logging in those cases. if not _logging.getLogger().handlers: # Determine whether we are in an interactive environment _interactive = False try: # This is only defined in interactive shells. if _sys.ps1: _interactive = True except AttributeError: # Even now, we may be in an interactive shell with `python -i`. _interactive = _sys.flags.interactive # If we are in an interactive environment (like Jupyter), set loglevel # to INFO and pipe the output to stdout. if _interactive: #logger.setLevel(INFO) _logging_target = _sys.stdout else: _logging_target = _sys.stderr # Add the output handler. _handler = _logging.StreamHandler(_logging_target) _handler.setFormatter(_logging.Formatter(_logging_fmt, None)) logger.addHandler(_handler) _logger = logger return _logger finally: _logger_lock.release() def warn(msg, *args, **kwargs): extra = {'nndct_prefix': _log_prefix(WARN)} get_logger().warning(msg, *args, extra=extra, **kwargs)
null
23,108
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging as _logging import os as _os import sys as _sys import time as _time import traceback as _traceback from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import WARN from logging import NOTSET import threading def _log_prefix(level, timestamp=None, file_and_line=None): def get_logger(): def error(msg, *args, **kwargs): extra = {'nndct_prefix': _log_prefix(ERROR)} get_logger().error(msg, *args, extra=extra, **kwargs)
null
23,109
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging as _logging import os as _os import sys as _sys import time as _time import traceback as _traceback from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import WARN from logging import NOTSET import threading def _log_prefix(level, timestamp=None, file_and_line=None): """Generate a nndct logline prefix.""" # pylint: disable=global-variable-not-assigned global _level_names # pylint: enable=global-variable-not-assigned # Record current time now = timestamp or _time.time() now_tuple = _time.localtime(now) now_microsecond = int(1e6 * (now % 1.0)) (filename, line) = file_and_line or _get_file_and_line() basename = _os.path.basename(filename) # Severity string severity = 'I' if level in _level_names: severity = _level_names[level][0] s = '%c%02d%02d %02d:%02d:%02d.%06d %s]' % ( severity, now_tuple[1], # month now_tuple[2], # day now_tuple[3], # hour now_tuple[4], # min now_tuple[5], # sec now_microsecond, basename) return s def get_logger(): """Return logger instance.""" global _logger # Use double-checked locking to avoid taking lock unnecessarily. if _logger: return _logger _logger_lock.acquire() try: if _logger: return _logger # Scope the TensorFlow logger to not conflict with users' loggers. logger = _logging.getLogger('nndct') logger.setLevel(1) # Override findCaller on the logger to skip internal helper functions logger.findCaller = _logger_find_caller # Don't further configure the TensorFlow logger if the root logger is # already configured. This prevents double logging in those cases. if not _logging.getLogger().handlers: # Determine whether we are in an interactive environment _interactive = False try: # This is only defined in interactive shells. if _sys.ps1: _interactive = True except AttributeError: # Even now, we may be in an interactive shell with `python -i`. _interactive = _sys.flags.interactive # If we are in an interactive environment (like Jupyter), set loglevel # to INFO and pipe the output to stdout. if _interactive: #logger.setLevel(INFO) _logging_target = _sys.stdout else: _logging_target = _sys.stderr # Add the output handler. _handler = _logging.StreamHandler(_logging_target) _handler.setFormatter(_logging.Formatter(_logging_fmt, None)) logger.addHandler(_handler) _logger = logger return _logger finally: _logger_lock.release() def fatal(msg, *args, **kwargs): extra = {'nndct_prefix': _log_prefix(FATAL)} get_logger().fatal(msg, *args, extra=extra, **kwargs)
null
23,110
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging as _logging import os as _os import sys as _sys import time as _time import traceback as _traceback from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import WARN from logging import NOTSET import threading def get_logger(): """Return logger instance.""" global _logger # Use double-checked locking to avoid taking lock unnecessarily. if _logger: return _logger _logger_lock.acquire() try: if _logger: return _logger # Scope the TensorFlow logger to not conflict with users' loggers. logger = _logging.getLogger('nndct') logger.setLevel(1) # Override findCaller on the logger to skip internal helper functions logger.findCaller = _logger_find_caller # Don't further configure the TensorFlow logger if the root logger is # already configured. This prevents double logging in those cases. if not _logging.getLogger().handlers: # Determine whether we are in an interactive environment _interactive = False try: # This is only defined in interactive shells. if _sys.ps1: _interactive = True except AttributeError: # Even now, we may be in an interactive shell with `python -i`. _interactive = _sys.flags.interactive # If we are in an interactive environment (like Jupyter), set loglevel # to INFO and pipe the output to stdout. if _interactive: #logger.setLevel(INFO) _logging_target = _sys.stdout else: _logging_target = _sys.stderr # Add the output handler. _handler = _logging.StreamHandler(_logging_target) _handler.setFormatter(_logging.Formatter(_logging_fmt, None)) logger.addHandler(_handler) _logger = logger return _logger finally: _logger_lock.release() The provided code snippet includes necessary dependencies for implementing the `get_verbosity` function. Write a Python function `def get_verbosity()` to solve the following problem: Return how much logging output will be produced. Here is the function: def get_verbosity(): """Return how much logging output will be produced.""" return get_logger().getEffectiveLevel()
Return how much logging output will be produced.
23,111
from __future__ import absolute_import from __future__ import division from __future__ import print_function import logging as _logging import os as _os import sys as _sys import time as _time import traceback as _traceback from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import WARN from logging import NOTSET import threading def get_logger(): """Return logger instance.""" global _logger # Use double-checked locking to avoid taking lock unnecessarily. if _logger: return _logger _logger_lock.acquire() try: if _logger: return _logger # Scope the TensorFlow logger to not conflict with users' loggers. logger = _logging.getLogger('nndct') logger.setLevel(1) # Override findCaller on the logger to skip internal helper functions logger.findCaller = _logger_find_caller # Don't further configure the TensorFlow logger if the root logger is # already configured. This prevents double logging in those cases. if not _logging.getLogger().handlers: # Determine whether we are in an interactive environment _interactive = False try: # This is only defined in interactive shells. if _sys.ps1: _interactive = True except AttributeError: # Even now, we may be in an interactive shell with `python -i`. _interactive = _sys.flags.interactive # If we are in an interactive environment (like Jupyter), set loglevel # to INFO and pipe the output to stdout. if _interactive: #logger.setLevel(INFO) _logging_target = _sys.stdout else: _logging_target = _sys.stderr # Add the output handler. _handler = _logging.StreamHandler(_logging_target) _handler.setFormatter(_logging.Formatter(_logging_fmt, None)) logger.addHandler(_handler) _logger = logger return _logger finally: _logger_lock.release() The provided code snippet includes necessary dependencies for implementing the `set_verbosity` function. Write a Python function `def set_verbosity(v)` to solve the following problem: Sets the threshold for what messages will be logged. Here is the function: def set_verbosity(v): """Sets the threshold for what messages will be logged.""" get_logger().setLevel(v)
Sets the threshold for what messages will be logged.
23,112
import collections import json import os from nndct_shared.pruning.pruning_lib import PruningSpec, NodeGroup from nndct_shared.pruning import errors from nndct_shared.utils import io from typing import List import os if not os.path.exists(BASE_DIR): os.makedirs(BASE_DIR) def save_searcher(searcher, filepath): io.create_work_dir(os.path.dirname(filepath)) with open(filepath, 'w') as f: f.write(searcher.to_json())
null
23,113
import collections import json import os from nndct_shared.pruning.pruning_lib import PruningSpec, NodeGroup from nndct_shared.pruning import errors from nndct_shared.utils import io from typing import List class SubnetSearcher(object): def __init__(self, groups: List[NodeGroup]): def set_supernet(self, score, macs=None): def add_subnet(self, ratios, score, macs=None): def _sorted_subnet(self): def subnet(self, index): def best_subnet(self): def spec(self, ratios): def groups(self): def config(self): def supernet(self): def supernet(self, supernet): def serialize(self): def deserialize(cls, data): def to_json(self): def load_searcher(filepath): with open(filepath, 'r') as f: return SubnetSearcher.deserialize(json.load(f))
null
23,114
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry def is_separable_conv_depthwise_weight(param): if param.name == 'DEPTHWISE_WEIGHT': return True else: return False
null
23,115
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry def is_separable_conv_pointwise_weight(param): if param.name == 'POINTWISE_WEIGHT': return True else: return False
null
23,116
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry def get_conv_weight(node): if node.op.type == OpTypes.SEPARABLECONV2D: # pointwise weight can infer the out dim prune weight = node.op.param['pointwise_weight'] else: weight = node.op.param['weights'] return weight
null
23,117
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry ALL_CONV_OPS = CONV_OPS + DEPTHWISECONV_OPS + TRANSPOSECONV_OPS def is_depthwise_conv(op): if op.type in DEPTHWISECONV_OPS: return True if not is_grouped_conv(op): return False out_channels = op.attr['out_dim'] in_channels = op.attr['in_dim'] group = op.attr['group'] return group == in_channels def group_nodes(graph: Graph, nodes_to_exclude: List[str] = [], with_group_conv: bool = False) -> List[NodeGroup]: """Divide convolution nodes into different groups. The nodes that connected with each other by elementwise operation will be divided into one group. """ node_group_union = node_group_lib.NodeGroupUnion() for node in graph.nodes: if node.op.type in CONV_OPS: node_group_union.add_node(node.name) elementwise_op = [OpTypes.ADD, OpTypes.MULTIPLYLAYER, OpTypes.SUB] node_group_union = group_nodes_by_eltwise_op(graph, node_group_union, elementwise_op) node_group_union = group_nodes_by_shared_weight(graph, node_group_union) nodes_to_exclude.extend(get_nodes_cannot_be_pruned(graph, with_group_conv)) all_groups: List[List[str]] = node_group_union.groups() ret: List[NodeGroup] = [] for group in all_groups: skip = False for node in nodes_to_exclude: if node in group: skip = True break if not skip: ret.append(NodeGroup(group)) assign_num_groups(graph, ret) return ret def find_prunable_ancestor(graph, node, target_ops=CONV_OPS): return find_ancestor(graph, node, target_ops, [OpTypes.CONCAT]) def find_prunable_child(graph, node, target_ops=CONV_OPS): return find_child(graph, node, target_ops, [OpTypes.CONCAT]) The provided code snippet includes necessary dependencies for implementing the `group_nodes_for_ofa_dynamic_conv` function. Write a Python function `def group_nodes_for_ofa_dynamic_conv(graph)` to solve the following problem: Divide convolution nodes into different groups. 1*1 conv only can expand or squeeze dim 3*3 conv 0 is_depthwise_conv 0 ancestor node 1 +/* node 1 not is_depthwise_conv 0 +/* node. Here is the function: def group_nodes_for_ofa_dynamic_conv(graph): """Divide convolution nodes into different groups. 1*1 conv only can expand or squeeze dim 3*3 conv 0 is_depthwise_conv 0 ancestor node 1 +/* node 1 not is_depthwise_conv 0 +/* node. """ group_nodes = [] depthwise_conv_num = 0 common_conv_num = 0 is_mobile = -1 for node in graph.nodes: attrs = {name: node.op.get_config(name) for name in node.op.configs} if node.op.type in ALL_CONV_OPS and min(attrs['kernel_size'][0], attrs['kernel_size'][1]) >= 3: if is_depthwise_conv(node.op): depthwise_conv_num += 1 else: common_conv_num += 1 if depthwise_conv_num > common_conv_num: # mobilenet is_mobile = 1 else: is_mobile = 0 for node in graph.nodes: attrs = {name: node.op.get_config(name) for name in node.op.configs} if is_mobile == 1: # mobilenet if node.op.type in ALL_CONV_OPS and min(attrs['kernel_size'][0], attrs['kernel_size'][1]) >= 3: if is_depthwise_conv(node.op): ancestor = find_prunable_ancestor(graph, node) # 1*1 3*3d 1*1 if ancestor is not None: group_nodes.append([ancestor.name, node.name]) else: child = find_prunable_child(graph, node) # 3*3d 1*1 if child is not None: group_nodes.append([child.name]) else: group_nodes.append([node.name]) else: # resnet if node.op.type in ALL_CONV_OPS: if is_depthwise_conv(node.op): ancestor = find_prunable_ancestor(graph, node) # 1*1 3*3d 1*1 if ancestor is not None: group_nodes.append([ancestor.name, node.name]) else: child = find_prunable_child(graph, node) # 3*3d 1*1 if child is not None: group_nodes.append([child.name]) else: group_nodes.append([node.name]) for node in graph.nodes: if node.op.type != OpTypes.ADD and node.op.type != OpTypes.MULTIPLY: continue eltwise_inputs = [] for name in node.in_nodes: input_node = graph.node(name) if input_node.op.type in ALL_CONV_OPS: eltwise_inputs.append(name) else: ancestor = find_prunable_ancestor(graph, input_node, ALL_CONV_OPS) if ancestor: eltwise_inputs.append(ancestor.name) if len(eltwise_inputs ) < 2 or eltwise_inputs[0] == eltwise_inputs[1]: # Exclude hswish continue for index, group in enumerate(group_nodes): if eltwise_inputs[0] in group or eltwise_inputs[1] in group: group_nodes[index] = list(set(group + eltwise_inputs)) # connected components algorithm pool = set(map(frozenset, group_nodes)) groups = [] while pool: groups.append(set(pool.pop())) while True: for candidate in pool: if groups[-1] & candidate: groups[-1] |= candidate pool.remove(candidate) break else: break for index, group in enumerate(groups): groups[index] = list(group) return groups, is_mobile
Divide convolution nodes into different groups. 1*1 conv only can expand or squeeze dim 3*3 conv 0 is_depthwise_conv 0 ancestor node 1 +/* node 1 not is_depthwise_conv 0 +/* node.
23,118
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry DEPTHWISECONV_OPS = [ OpTypes.DEPTHWISE_CONV2D, OpTypes.DEPTHWISE_CONVTRANSPOSE2D, OpTypes.DEPTHWISE_CONV3D, OpTypes.DEPTHWISE_CONVTRANSPOSE3D ] ALL_CONV_OPS = CONV_OPS + DEPTHWISECONV_OPS + TRANSPOSECONV_OPS def find_upper_ancestor(graph, node, target_ops): visited = set() ancestor_list = [] def dfs(graph, node, visited, ancestor_list, target_ops): if node.op.type in target_ops: ancestor_list.append(node) return visited.add(node.name) for input_name in node.in_nodes: if input_name not in visited: dfs(graph, graph.node(input_name), visited, ancestor_list, target_ops) dfs(graph, node, visited, ancestor_list, target_ops) return ancestor_list def find_prunable_child(graph, node, target_ops=CONV_OPS): return find_child(graph, node, target_ops, [OpTypes.CONCAT]) The provided code snippet includes necessary dependencies for implementing the `find_leaf_node` function. Write a Python function `def find_leaf_node(graph)` to solve the following problem: find first and last conv layer. if second_node is depthwise_conv, add it. Here is the function: def find_leaf_node(graph): """ find first and last conv layer. if second_node is depthwise_conv, add it. """ first_conv_nodes = [] last_conv_nodes = [] for node in graph.nodes: if len(node.in_nodes) == 0: # input first_conv_node = find_prunable_child(graph, node, ALL_CONV_OPS) if first_conv_node is not None: first_conv_nodes.append(first_conv_node) second_conv_node = find_prunable_child(graph, first_conv_node, ALL_CONV_OPS) if second_conv_node is not None and second_conv_node.op.type in DEPTHWISECONV_OPS: first_conv_nodes.append(second_conv_node) elif len(node.out_nodes) == 0 and node.op.type in ALL_CONV_OPS: last_conv_nodes.append(node) elif len(node.out_nodes) == 0: # output last_conv_node_list = find_upper_ancestor(graph, node, ALL_CONV_OPS) if len(last_conv_node_list) != 0: for i in last_conv_node_list: last_conv_nodes.append(i) return first_conv_nodes, last_conv_nodes
find first and last conv layer. if second_node is depthwise_conv, add it.
23,119
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry _op_modifier = registry.Registry("Pruning Modifier Functions") def propagate_node_pruning(node, pruning_res): _set_input_by_upstream(node, pruning_res) _set_output_by_input(node, pruning_res) The provided code snippet includes necessary dependencies for implementing the `update_node_by_pruning` function. Write a Python function `def update_node_by_pruning(graph, node, pruning_res)` to solve the following problem: Looks up the node's modification function in the registry and calls it. This function takes a NndctGraph object, a NndctNode from it, and the dictionary of PruningInfo and if there's an associated modification method, calls it. If no function has been registered for the particular op type, a general fucntion will be called which simply set current node's pruning info and do not update the node's attribute. statistics object Args: graph: A NndctGraph that the pruning is performed on. node: A NndctNode describing the operator. pruning_res: A dictionary of `StructuredPruning`. Here is the function: def update_node_by_pruning(graph, node, pruning_res): """Looks up the node's modification function in the registry and calls it. This function takes a NndctGraph object, a NndctNode from it, and the dictionary of PruningInfo and if there's an associated modification method, calls it. If no function has been registered for the particular op type, a general fucntion will be called which simply set current node's pruning info and do not update the node's attribute. statistics object Args: graph: A NndctGraph that the pruning is performed on. node: A NndctNode describing the operator. pruning_res: A dictionary of `StructuredPruning`. """ op_type = node.op.type if op_type in _op_modifier: mod_func = _op_modifier.lookup(op_type) mod_func(graph, node, pruning_res) else: propagate_node_pruning(node, pruning_res)
Looks up the node's modification function in the registry and calls it. This function takes a NndctGraph object, a NndctNode from it, and the dictionary of PruningInfo and if there's an associated modification method, calls it. If no function has been registered for the particular op type, a general fucntion will be called which simply set current node's pruning info and do not update the node's attribute. statistics object Args: graph: A NndctGraph that the pruning is performed on. node: A NndctNode describing the operator. pruning_res: A dictionary of `StructuredPruning`.
23,120
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry def _modify_depthwise(graph, node, pruning_res): node_pruning = _set_input_by_upstream(node, pruning_res) if not node_pruning.has_in_dim(): return # We do not actively prune the depthwise conv, but simply modify the weights # based on the pruning information of the previous layers # Weight layout: [depth_multiplier, H, W, input_dim] dw_multiplier = node.op.attr['out_dim'] // node.op.attr['in_dim'] removed_outputs = [] for c in node_pruning.removed_inputs: for k in range(dw_multiplier): removed_outputs.append(c * dw_multiplier + k) node_pruning.removed_outputs = removed_outputs node_pruning.out_dim = node_pruning.in_dim * dw_multiplier node.op.attr['group'] = node_pruning.in_dim node.op.attr['in_dim'] = node_pruning.in_dim node.op.attr['out_dim'] = node_pruning.out_dim OpTypes.DEPTHWISE_CONV2D, OpTypes.DEPTHWISE_CONV3D, OpTypes.DEPTHWISE_CONVTRANSPOSE2D, OpTypes.DEPTHWISE_CONVTRANSPOSE3D def modify_depthwise_conv(graph, node, pruning_res): _modify_depthwise(graph, node, pruning_res)
null
23,121
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry def is_depthwise_conv(op): if op.type in DEPTHWISECONV_OPS: return True if not is_grouped_conv(op): return False out_channels = op.attr['out_dim'] in_channels = op.attr['in_dim'] group = op.attr['group'] return group == in_channels def is_separable_conv(op): if op.type in SEPARABLECONV_OPS: # at present return True return False def _set_input_by_upstream(node, pruning_res): node_pruning = pruning_res[node.name] input_pruning = pruning_res[node.in_nodes[0]] node_pruning.removed_inputs = input_pruning.removed_outputs node_pruning.in_dim = input_pruning.out_dim logging.vlog( 3, 'Set input pruning of {} by upstream {}'.format(node.name, node.in_nodes[0])) return node_pruning def _modify_depthwise(graph, node, pruning_res): node_pruning = _set_input_by_upstream(node, pruning_res) if not node_pruning.has_in_dim(): return # We do not actively prune the depthwise conv, but simply modify the weights # based on the pruning information of the previous layers # Weight layout: [depth_multiplier, H, W, input_dim] dw_multiplier = node.op.attr['out_dim'] // node.op.attr['in_dim'] removed_outputs = [] for c in node_pruning.removed_inputs: for k in range(dw_multiplier): removed_outputs.append(c * dw_multiplier + k) node_pruning.removed_outputs = removed_outputs node_pruning.out_dim = node_pruning.in_dim * dw_multiplier node.op.attr['group'] = node_pruning.in_dim node.op.attr['in_dim'] = node_pruning.in_dim node.op.attr['out_dim'] = node_pruning.out_dim OpTypes.DEPTHWISE_CONV2D, OpTypes.DEPTHWISE_CONV3D, OpTypes.DEPTHWISE_CONVTRANSPOSE2D, OpTypes.DEPTHWISE_CONVTRANSPOSE3D def modify_separable_conv(graph, node, pruning_res): node_pruning = _set_input_by_upstream(node, pruning_res) if not node_pruning.has_in_dim(): return dw_multiplier = node.op.attr['depth_multiplier'] removed_pointwise_kernel_inputs = [] for c in node_pruning.removed_inputs: for k in range(dw_multiplier): removed_pointwise_kernel_inputs.append(c * dw_multiplier + k) # depthwise kernel can prune inputs but can't prune output # pointwise kernel can prune inputs and output # but the inputs is depend on the depthwise kernel node_pruning.removed_separableconv_pointwise_inputs = \ removed_pointwise_kernel_inputs if node_pruning.has_in_dim(): node.op.attr['in_dim'] = node_pruning.in_dim if node_pruning.has_out_dim(): node.op.attr['out_dim'] = node_pruning.out_dim def modify_conv2d(graph, node, pruning_res): # In pytorch, dw conv is repesented by conv2d with groups == in_channels and # out_channels == K * in_channels, where K is a positive integer. if is_depthwise_conv(node.op): _modify_depthwise(graph, node, pruning_res) return if is_separable_conv(node.op): modify_separable_conv(graph, node, pruning_res) return node_pruning = _set_input_by_upstream(node, pruning_res) if node_pruning.has_in_dim(): node.op.attr['in_dim'] = node_pruning.in_dim if node_pruning.has_out_dim(): node.op.attr['out_dim'] = node_pruning.out_dim
null
23,122
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry def modify_instancenorm(graph, node, pruning_res): # Under test... node_pruning = pruning_res[node.name] input_pruning = pruning_res[node.in_nodes[0]] removed_outputs = input_pruning.removed_outputs out_dim = input_pruning.out_dim node_pruning.removed_inputs = removed_outputs node_pruning.removed_outputs = removed_outputs node_pruning.in_dim = out_dim node_pruning.out_dim = out_dim
null
23,123
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry def modify_batchnorm(graph, node, pruning_res): node_pruning = pruning_res[node.name] input_pruning = pruning_res[node.in_nodes[0]] removed_outputs = input_pruning.removed_outputs out_dim = input_pruning.out_dim node_pruning.removed_inputs = removed_outputs node_pruning.removed_outputs = removed_outputs node_pruning.in_dim = out_dim node_pruning.out_dim = out_dim if node_pruning.has_out_dim(): node.op.attr['out_dim'] = node_pruning.out_dim
null
23,124
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry CONV_OPS = [ OpTypes.CONV2D, OpTypes.CONVTRANSPOSE2D, OpTypes.CONV3D, OpTypes.CONVTRANSPOSE3D, OpTypes.SEPARABLECONV2D ] def find_prunable_ancestor(graph, node, target_ops=CONV_OPS): def propagate_node_pruning(node, pruning_res): def modify_flatten(graph, node, pruning_res): input_pruning = pruning_res[node.in_nodes[0]] if input_pruning.removed_outputs: downstream_nodes = [] queue = [node.name] while len(queue): cur_node = graph.node(queue.pop()) downstream_nodes.append(cur_node) queue.extend(cur_node.out_nodes) ancestor = find_prunable_ancestor(graph, node) for downstream_node in downstream_nodes: if downstream_node.op.type in CONV_OPS: raise errors.OptimizerNotExcludeNodeError( 'Must exclude node from pruning: {}'.format(ancestor.name)) propagate_node_pruning(node, pruning_res)
null
23,125
from __future__ import absolute_import from __future__ import division from __future__ import print_function from typing import List, Mapping, Any, Union, Tuple import collections from nndct_shared.base.key_names import NNDCT_OP as OpTypes from nndct_shared.nndct_graph.base_node import Node from nndct_shared.metaclass import Singleton from nndct_shared.nndct_graph.base_graph import Graph from nndct_shared.pruning import errors from nndct_shared.pruning import logging from nndct_shared.pruning import node_group as node_group_lib from nndct_shared.utils import registry def modify_dense(graph, node, pruning_res): input_pruning = pruning_res[node.in_nodes[0]] if not input_pruning.has_out_dim(): return orig_out_depth = len(input_pruning.removed_outputs) + input_pruning.out_dim spatial_size = node.op.attr['in_dim'] // orig_out_depth # Two data formats: # [-1, 7, 7, 64(32)] => [-1, 3136(1518)] # [-1, 32(16), 5, 5] => [-1, 800(400)] removed_inputs = [] data_format = graph.data_format if hasattr( graph, 'data_format') else 'channels_first' if data_format == 'channels_last': for s in range(spatial_size): for c in input_pruning.removed_outputs: removed_inputs.append(s * orig_out_depth + c) else: for c in input_pruning.removed_outputs: for s in range(spatial_size): removed_inputs.append(c * spatial_size + s) in_features = spatial_size * input_pruning.out_dim node.op.attr['in_dim'] = in_features node_pruning = pruning_res[node.name] node_pruning.removed_inputs = removed_inputs node_pruning.in_dim = in_features node_pruning.out_dim = node.op.attr['out_dim']
null