id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
9,265 | import os
from tqdm import tqdm
import numpy as np
import tiktoken
from datasets import load_dataset
enc = tiktoken.get_encoding("gpt2")
def process(example):
ids = enc.encode_ordinary(example['text']) # encode_ordinary ignores any special tokens
ids.append(enc.eot_token) # add the end of text token, e.g. 50256 for gpt2 bpe
# note: I think eot should be prepended not appended... hmm. it's called "eot" though...
out = {'ids': ids, 'len': len(ids)}
return out | null |
9,266 | import os
import pickle
import requests
import numpy as np
stoi = { ch:i for i,ch in enumerate(chars) }
def encode(s):
return [stoi[c] for c in s] # encoder: take a string, output a list of integers | null |
9,267 | import os
import pickle
import requests
import numpy as np
itos = { i:ch for i,ch in enumerate(chars) }
def decode(l):
return ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string | null |
9,268 | import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
from tqdm import tqdm
import mrcfile
import legacy
from camera_utils import LookAtPoseSampler, FOV_to_intrinsics
from torch_utils import misc
from training.triplane import TriPlaneGenerator
The provided code snippet includes necessary dependencies for implementing the `parse_range` function. Write a Python function `def parse_range(s: Union[str, List]) -> List[int]` to solve the following problem:
Parse a comma separated list of numbers or ranges and return a list of ints. Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
Here is the function:
def parse_range(s: Union[str, List]) -> List[int]:
'''Parse a comma separated list of numbers or ranges and return a list of ints.
Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
'''
if isinstance(s, list): return s
ranges = []
range_re = re.compile(r'^(\d+)-(\d+)$')
for p in s.split(','):
if m := range_re.match(p):
ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
else:
ranges.append(int(p))
return ranges | Parse a comma separated list of numbers or ranges and return a list of ints. Example: '1,2,5-10' returns [1, 2, 5, 6, 7] |
9,269 | import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
from tqdm import tqdm
import mrcfile
import legacy
from camera_utils import LookAtPoseSampler, FOV_to_intrinsics
from torch_utils import misc
from training.triplane import TriPlaneGenerator
The provided code snippet includes necessary dependencies for implementing the `parse_vec2` function. Write a Python function `def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]` to solve the following problem:
Parse a floating point 2-vector of syntax 'a,b'. Example: '0,1' returns (0,1)
Here is the function:
def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]:
'''Parse a floating point 2-vector of syntax 'a,b'.
Example:
'0,1' returns (0,1)
'''
if isinstance(s, tuple): return s
parts = s.split(',')
if len(parts) == 2:
return (float(parts[0]), float(parts[1]))
raise ValueError(f'cannot parse 2-vector {s}') | Parse a floating point 2-vector of syntax 'a,b'. Example: '0,1' returns (0,1) |
9,270 | import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
from tqdm import tqdm
import mrcfile
import legacy
from camera_utils import LookAtPoseSampler, FOV_to_intrinsics
from torch_utils import misc
from training.triplane import TriPlaneGenerator
def make_transform(translate: Tuple[float,float], angle: float):
m = np.eye(3)
s = np.sin(angle/360.0*np.pi*2)
c = np.cos(angle/360.0*np.pi*2)
m[0][0] = c
m[0][1] = s
m[0][2] = translate[0]
m[1][0] = -s
m[1][1] = c
m[1][2] = translate[1]
return m | null |
9,271 | import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
from tqdm import tqdm
import mrcfile
import legacy
from camera_utils import LookAtPoseSampler, FOV_to_intrinsics
from torch_utils import misc
from training.triplane import TriPlaneGenerator
def create_samples(N=256, voxel_origin=[0, 0, 0], cube_length=2.0):
# NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle
voxel_origin = np.array(voxel_origin) - cube_length/2
voxel_size = cube_length / (N - 1)
overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())
samples = torch.zeros(N ** 3, 3)
# transform first 3 columns
# to be the x, y, z index
samples[:, 2] = overall_index % N
samples[:, 1] = (overall_index.float() / N) % N
samples[:, 0] = ((overall_index.float() / N) / N) % N
# transform first 3 columns
# to be the x, y, z coordinate
samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]
samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]
samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]
num_samples = N ** 3
return samples.unsqueeze(0), voxel_origin, voxel_size
class LookAtPoseSampler:
"""
Same as GaussianCameraPoseSampler, except the
camera is specified as looking at 'lookat_position', a 3-vector.
Example:
For a camera pose looking at the origin with the camera at position [0, 0, 1]:
cam2world = LookAtPoseSampler.sample(math.pi/2, math.pi/2, torch.tensor([0, 0, 0]), radius=1)
"""
def sample(horizontal_mean, vertical_mean, lookat_position, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'):
h = torch.randn((batch_size, 1), device=device) * horizontal_stddev + horizontal_mean
v = torch.randn((batch_size, 1), device=device) * vertical_stddev + vertical_mean
v = torch.clamp(v, 1e-5, math.pi - 1e-5)
theta = h
v = v / math.pi
phi = torch.arccos(1 - 2*v)
camera_origins = torch.zeros((batch_size, 3), device=device)
camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta)
camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta)
camera_origins[:, 1:2] = radius*torch.cos(phi)
# forward_vectors = math_utils.normalize_vecs(-camera_origins)
forward_vectors = math_utils.normalize_vecs(lookat_position - camera_origins)
return create_cam2world_matrix(forward_vectors, camera_origins)
def FOV_to_intrinsics(fov_degrees, device='cpu'):
"""
Creates a 3x3 camera intrinsics matrix from the camera field of view, specified in degrees.
Note the intrinsics are returned as normalized by image size, rather than in pixel units.
Assumes principal point is at image center.
"""
focal_length = float(1 / (math.tan(fov_degrees * 3.14159 / 360) * 1.414))
intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device)
return intrinsics
class TriPlaneGenerator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.img_resolution=img_resolution
self.img_channels=img_channels
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler()
self.backbone = StyleGAN2Backbone(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
self.superresolution = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], **sr_kwargs)
self.decoder = OSGDecoder(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32})
self.neural_rendering_resolution = 64
self.rendering_kwargs = rendering_kwargs
self._last_planes = None
def mapping(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
if self.rendering_kwargs['c_gen_conditioning_zero']:
c = torch.zeros_like(c)
return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
def synthesis(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
cam2world_matrix = c[:, :16].view(-1, 4, 4)
intrinsics = c[:, 16:25].view(-1, 3, 3)
if neural_rendering_resolution is None:
neural_rendering_resolution = self.neural_rendering_resolution
else:
self.neural_rendering_resolution = neural_rendering_resolution
# Create a batch of rays for volume rendering
ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution)
# Create triplanes by running StyleGAN backbone
N, M, _ = ray_origins.shape
if use_cached_backbone and self._last_planes is not None:
planes = self._last_planes
else:
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
if cache_backbone:
self._last_planes = planes
# Reshape output into three 32-channel planes
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
# Perform volume rendering
feature_samples, depth_samples, weights_samples = self.renderer(planes, self.decoder, ray_origins, ray_directions, self.rendering_kwargs) # channels last
# Reshape into 'raw' neural-rendered image
H = W = self.neural_rendering_resolution
feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous()
depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W)
# Run superresolution to get final image
rgb_image = feature_image[:, :3]
sr_image = self.superresolution(rgb_image, feature_image, ws, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
return {'image': sr_image, 'image_raw': rgb_image, 'image_depth': depth_image}
def sample(self, coordinates, directions, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes.
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def sample_mixed(self, coordinates, directions, ws, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Same as sample, but expects latent vectors 'ws' instead of Gaussian noise 'z'
planes = self.backbone.synthesis(ws, update_emas = update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
# Render a batch of generated images.
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return self.synthesis(ws, c, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, **synthesis_kwargs)
def convert_sdf_samples_to_ply(
numpy_3d_sdf_tensor,
voxel_grid_origin,
voxel_size,
ply_filename_out,
offset=None,
scale=None,
level=0.0
):
"""
Convert sdf samples to .ply
:param pytorch_3d_sdf_tensor: a torch.FloatTensor of shape (n,n,n)
:voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid
:voxel_size: float, the size of the voxels
:ply_filename_out: string, path of the filename to save to
This function adapted from: https://github.com/RobotLocomotion/spartan
"""
start_time = time.time()
verts, faces, normals, values = np.zeros((0, 3)), np.zeros((0, 3)), np.zeros((0, 3)), np.zeros(0)
# try:
verts, faces, normals, values = skimage.measure.marching_cubes(
numpy_3d_sdf_tensor, level=level, spacing=[voxel_size] * 3
)
# except:
# pass
# transform from voxel coordinates to camera coordinates
# note x and y are flipped in the output of marching_cubes
mesh_points = np.zeros_like(verts)
mesh_points[:, 0] = voxel_grid_origin[0] + verts[:, 0]
mesh_points[:, 1] = voxel_grid_origin[1] + verts[:, 1]
mesh_points[:, 2] = voxel_grid_origin[2] + verts[:, 2]
# apply additional offset and scale
if scale is not None:
mesh_points = mesh_points / scale
if offset is not None:
mesh_points = mesh_points - offset
# try writing to the ply file
num_verts = verts.shape[0]
num_faces = faces.shape[0]
verts_tuple = np.zeros((num_verts,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")])
for i in range(0, num_verts):
verts_tuple[i] = tuple(mesh_points[i, :])
faces_building = []
for i in range(0, num_faces):
faces_building.append(((faces[i, :].tolist(),)))
faces_tuple = np.array(faces_building, dtype=[("vertex_indices", "i4", (3,))])
el_verts = plyfile.PlyElement.describe(verts_tuple, "vertex")
el_faces = plyfile.PlyElement.describe(faces_tuple, "face")
ply_data = plyfile.PlyData([el_verts, el_faces])
ply_data.write(ply_filename_out)
print(f"wrote to {ply_filename_out}")
The provided code snippet includes necessary dependencies for implementing the `generate_images` function. Write a Python function `def generate_images( network_pkl: str, seeds: List[int], truncation_psi: float, truncation_cutoff: int, outdir: str, shapes: bool, shape_res: int, fov_deg: float, shape_format: str, class_idx: Optional[int], reload_modules: bool, )` to solve the following problem:
Generate images using pretrained network pickle. Examples: \b # Generate an image using pre-trained FFHQ model. python gen_samples.py --outdir=output --trunc=0.7 --seeds=0-5 --shapes=True\\ --network=ffhq-rebalanced-128.pkl
Here is the function:
def generate_images(
network_pkl: str,
seeds: List[int],
truncation_psi: float,
truncation_cutoff: int,
outdir: str,
shapes: bool,
shape_res: int,
fov_deg: float,
shape_format: str,
class_idx: Optional[int],
reload_modules: bool,
):
"""Generate images using pretrained network pickle.
Examples:
\b
# Generate an image using pre-trained FFHQ model.
python gen_samples.py --outdir=output --trunc=0.7 --seeds=0-5 --shapes=True\\
--network=ffhq-rebalanced-128.pkl
"""
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
# Specify reload_modules=True if you want code modifications to take effect; otherwise uses pickled code
if reload_modules:
print("Reloading Modules!")
G_new = TriPlaneGenerator(*G.init_args, **G.init_kwargs).eval().requires_grad_(False).to(device)
misc.copy_params_and_buffers(G, G_new, require_all=True)
G_new.neural_rendering_resolution = G.neural_rendering_resolution
G_new.rendering_kwargs = G.rendering_kwargs
G = G_new
os.makedirs(outdir, exist_ok=True)
cam2world_pose = LookAtPoseSampler.sample(3.14/2, 3.14/2, torch.tensor([0, 0, 0.2], device=device), radius=2.7, device=device)
intrinsics = FOV_to_intrinsics(fov_deg, device=device)
# Generate images.
for seed_idx, seed in enumerate(seeds):
print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
imgs = []
angle_p = -0.2
for angle_y, angle_p in [(.4, angle_p), (0, angle_p), (-.4, angle_p)]:
cam_pivot = torch.tensor(G.rendering_kwargs.get('avg_camera_pivot', [0, 0, 0]), device=device)
cam_radius = G.rendering_kwargs.get('avg_camera_radius', 2.7)
cam2world_pose = LookAtPoseSampler.sample(np.pi/2 + angle_y, np.pi/2 + angle_p, cam_pivot, radius=cam_radius, device=device)
conditioning_cam2world_pose = LookAtPoseSampler.sample(np.pi/2, np.pi/2, cam_pivot, radius=cam_radius, device=device)
camera_params = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
conditioning_params = torch.cat([conditioning_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
ws = G.mapping(z, conditioning_params, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff)
img = G.synthesis(ws, camera_params)['image']
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
imgs.append(img)
img = torch.cat(imgs, dim=2)
PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png')
if shapes:
# extract a shape.mrc with marching cubes. You can view the .mrc file using ChimeraX from UCSF.
max_batch=1000000
samples, voxel_origin, voxel_size = create_samples(N=shape_res, voxel_origin=[0, 0, 0], cube_length=G.rendering_kwargs['box_warp'] * 1)#.reshape(1, -1, 3)
samples = samples.to(z.device)
sigmas = torch.zeros((samples.shape[0], samples.shape[1], 1), device=z.device)
transformed_ray_directions_expanded = torch.zeros((samples.shape[0], max_batch, 3), device=z.device)
transformed_ray_directions_expanded[..., -1] = -1
head = 0
with tqdm(total = samples.shape[1]) as pbar:
with torch.no_grad():
while head < samples.shape[1]:
torch.manual_seed(0)
sigma = G.sample(samples[:, head:head+max_batch], transformed_ray_directions_expanded[:, :samples.shape[1]-head], z, conditioning_params, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, noise_mode='const')['sigma']
sigmas[:, head:head+max_batch] = sigma
head += max_batch
pbar.update(max_batch)
sigmas = sigmas.reshape((shape_res, shape_res, shape_res)).cpu().numpy()
sigmas = np.flip(sigmas, 0)
# Trim the border of the extracted cube
pad = int(30 * shape_res / 256)
pad_value = -1000
sigmas[:pad] = pad_value
sigmas[-pad:] = pad_value
sigmas[:, :pad] = pad_value
sigmas[:, -pad:] = pad_value
sigmas[:, :, :pad] = pad_value
sigmas[:, :, -pad:] = pad_value
if shape_format == '.ply':
from shape_utils import convert_sdf_samples_to_ply
convert_sdf_samples_to_ply(np.transpose(sigmas, (2, 1, 0)), [0, 0, 0], 1, os.path.join(outdir, f'seed{seed:04d}.ply'), level=10)
elif shape_format == '.mrc': # output mrc
with mrcfile.new_mmap(os.path.join(outdir, f'seed{seed:04d}.mrc'), overwrite=True, shape=sigmas.shape, mrc_mode=2) as mrc:
mrc.data[:] = sigmas | Generate images using pretrained network pickle. Examples: \b # Generate an image using pre-trained FFHQ model. python gen_samples.py --outdir=output --trunc=0.7 --seeds=0-5 --shapes=True\\ --network=ffhq-rebalanced-128.pkl |
9,272 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
_dnnlib_cache_dir = None
def set_cache_dir(path: str) -> None:
global _dnnlib_cache_dir
_dnnlib_cache_dir = path | null |
9,273 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
The provided code snippet includes necessary dependencies for implementing the `format_time` function. Write a Python function `def format_time(seconds: Union[int, float]) -> str` to solve the following problem:
Convert the seconds to human readable string with days, hours, minutes and seconds.
Here is the function:
def format_time(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
else:
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) | Convert the seconds to human readable string with days, hours, minutes and seconds. |
9,274 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
The provided code snippet includes necessary dependencies for implementing the `format_time_brief` function. Write a Python function `def format_time_brief(seconds: Union[int, float]) -> str` to solve the following problem:
Convert the seconds to human readable string with days, hours, minutes and seconds.
Here is the function:
def format_time_brief(seconds: Union[int, float]) -> str:
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
s = int(np.rint(seconds))
if s < 60:
return "{0}s".format(s)
elif s < 60 * 60:
return "{0}m {1:02}s".format(s // 60, s % 60)
elif s < 24 * 60 * 60:
return "{0}h {1:02}m".format(s // (60 * 60), (s // 60) % 60)
else:
return "{0}d {1:02}h".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24) | Convert the seconds to human readable string with days, hours, minutes and seconds. |
9,275 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
The provided code snippet includes necessary dependencies for implementing the `ask_yes_no` function. Write a Python function `def ask_yes_no(question: str) -> bool` to solve the following problem:
Ask the user the question until the user inputs a valid answer.
Here is the function:
def ask_yes_no(question: str) -> bool:
"""Ask the user the question until the user inputs a valid answer."""
while True:
try:
print("{0} [y/n]".format(question))
return strtobool(input().lower())
except ValueError:
pass | Ask the user the question until the user inputs a valid answer. |
9,276 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
The provided code snippet includes necessary dependencies for implementing the `tuple_product` function. Write a Python function `def tuple_product(t: Tuple) -> Any` to solve the following problem:
Calculate the product of the tuple elements.
Here is the function:
def tuple_product(t: Tuple) -> Any:
"""Calculate the product of the tuple elements."""
result = 1
for v in t:
result *= v
return result | Calculate the product of the tuple elements. |
9,277 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
_str_to_ctype = {
"uint8": ctypes.c_ubyte,
"uint16": ctypes.c_uint16,
"uint32": ctypes.c_uint32,
"uint64": ctypes.c_uint64,
"int8": ctypes.c_byte,
"int16": ctypes.c_int16,
"int32": ctypes.c_int32,
"int64": ctypes.c_int64,
"float32": ctypes.c_float,
"float64": ctypes.c_double
}
The provided code snippet includes necessary dependencies for implementing the `get_dtype_and_ctype` function. Write a Python function `def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]` to solve the following problem:
Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes.
Here is the function:
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
type_str = None
if isinstance(type_obj, str):
type_str = type_obj
elif hasattr(type_obj, "__name__"):
type_str = type_obj.__name__
elif hasattr(type_obj, "name"):
type_str = type_obj.name
else:
raise RuntimeError("Cannot infer type name from input")
assert type_str in _str_to_ctype.keys()
my_dtype = np.dtype(type_str)
my_ctype = _str_to_ctype[type_str]
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
return my_dtype, my_ctype | Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes. |
9,278 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
def is_pickleable(obj: Any) -> bool:
try:
with io.BytesIO() as stream:
pickle.dump(obj, stream)
return True
except:
return False | null |
9,279 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
"""Finds the python object with the given name and calls it as a function."""
assert func_name is not None
func_obj = get_obj_by_name(func_name)
assert callable(func_obj)
return func_obj(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `construct_class_by_name` function. Write a Python function `def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any` to solve the following problem:
Finds the python class with the given name and constructs it with the given arguments.
Here is the function:
def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
"""Finds the python class with the given name and constructs it with the given arguments."""
return call_func_by_name(*args, func_name=class_name, **kwargs) | Finds the python class with the given name and constructs it with the given arguments. |
9,280 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
"""Searches for the underlying module behind the name to some python object.
Returns the module and the object name (original name with module part removed)."""
# allow convenience shorthands, substitute them by full names
obj_name = re.sub("^np.", "numpy.", obj_name)
obj_name = re.sub("^tf.", "tensorflow.", obj_name)
# list alternatives for (module_name, local_obj_name)
parts = obj_name.split(".")
name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
# try each alternative in turn
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
return module, local_obj_name
except:
pass
# maybe some of the modules themselves contain errors?
for module_name, _local_obj_name in name_pairs:
try:
importlib.import_module(module_name) # may raise ImportError
except ImportError:
if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
raise
# maybe the requested attribute is missing?
for module_name, local_obj_name in name_pairs:
try:
module = importlib.import_module(module_name) # may raise ImportError
get_obj_from_module(module, local_obj_name) # may raise AttributeError
except ImportError:
pass
# we are out of luck, but we have no idea why
raise ImportError(obj_name)
The provided code snippet includes necessary dependencies for implementing the `get_module_dir_by_obj_name` function. Write a Python function `def get_module_dir_by_obj_name(obj_name: str) -> str` to solve the following problem:
Get the directory path of the module containing the given object name.
Here is the function:
def get_module_dir_by_obj_name(obj_name: str) -> str:
"""Get the directory path of the module containing the given object name."""
module, _ = get_module_from_obj_name(obj_name)
return os.path.dirname(inspect.getfile(module)) | Get the directory path of the module containing the given object name. |
9,281 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
def is_top_level_function(obj: Any) -> bool:
"""Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
The provided code snippet includes necessary dependencies for implementing the `get_top_level_function_name` function. Write a Python function `def get_top_level_function_name(obj: Any) -> str` to solve the following problem:
Return the fully-qualified name of a top-level function.
Here is the function:
def get_top_level_function_name(obj: Any) -> str:
"""Return the fully-qualified name of a top-level function."""
assert is_top_level_function(obj)
module = obj.__module__
if module == '__main__':
module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
return module + "." + obj.__name__ | Return the fully-qualified name of a top-level function. |
9,282 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
The provided code snippet includes necessary dependencies for implementing the `list_dir_recursively_with_ignore` function. Write a Python function `def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]` to solve the following problem:
List all files recursively in a given directory while ignoring given file and directory names. Returns list of tuples containing both absolute and relative paths.
Here is the function:
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
"""List all files recursively in a given directory while ignoring given file and directory names.
Returns list of tuples containing both absolute and relative paths."""
assert os.path.isdir(dir_path)
base_name = os.path.basename(os.path.normpath(dir_path))
if ignores is None:
ignores = []
result = []
for root, dirs, files in os.walk(dir_path, topdown=True):
for ignore_ in ignores:
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
# dirs need to be edited in-place
for d in dirs_to_remove:
dirs.remove(d)
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
absolute_paths = [os.path.join(root, f) for f in files]
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
if add_base_to_relative:
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
assert len(absolute_paths) == len(relative_paths)
result += zip(absolute_paths, relative_paths)
return result | List all files recursively in a given directory while ignoring given file and directory names. Returns list of tuples containing both absolute and relative paths. |
9,283 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
The provided code snippet includes necessary dependencies for implementing the `copy_files_and_create_dirs` function. Write a Python function `def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None` to solve the following problem:
Takes in a list of tuples of (src, dst) paths and copies files. Will create all necessary directories.
Here is the function:
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
"""Takes in a list of tuples of (src, dst) paths and copies files.
Will create all necessary directories."""
for file in files:
target_dir_name = os.path.dirname(file[1])
# will create all intermediate-level directories
if not os.path.exists(target_dir_name):
os.makedirs(target_dir_name)
shutil.copyfile(file[0], file[1]) | Takes in a list of tuples of (src, dst) paths and copies files. Will create all necessary directories. |
9,284 | import ctypes
import fnmatch
import importlib
import inspect
import numpy as np
import os
import shutil
import sys
import types
import io
import pickle
import re
import requests
import html
import hashlib
import glob
import tempfile
import urllib
import urllib.request
import uuid
from distutils.util import strtobool
from typing import Any, List, Tuple, Union
def make_cache_dir_path(*paths: str) -> str:
if _dnnlib_cache_dir is not None:
return os.path.join(_dnnlib_cache_dir, *paths)
if 'DNNLIB_CACHE_DIR' in os.environ:
return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
if 'HOME' in os.environ:
return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
if 'USERPROFILE' in os.environ:
return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
"""Determine whether the given object is a valid URL string."""
if not isinstance(obj, str) or not "://" in obj:
return False
if allow_file_urls and obj.startswith('file://'):
return True
try:
res = requests.compat.urlparse(obj)
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
if not res.scheme or not res.netloc or not "." in res.netloc:
return False
except:
return False
return True
The provided code snippet includes necessary dependencies for implementing the `open_url` function. Write a Python function `def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any` to solve the following problem:
Download the given URL and return a binary-mode file object to access the data.
Here is the function:
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
"""Download the given URL and return a binary-mode file object to access the data."""
assert num_attempts >= 1
assert not (return_filename and (not cache))
# Doesn't look like an URL scheme so interpret it as a local filename.
if not re.match('^[a-z]+://', url):
return url if return_filename else open(url, "rb")
# Handle file URLs. This code handles unusual file:// patterns that
# arise on Windows:
#
# file:///c:/foo.txt
#
# which would translate to a local '/c:/foo.txt' filename that's
# invalid. Drop the forward slash for such pathnames.
#
# If you touch this code path, you should test it on both Linux and
# Windows.
#
# Some internet resources suggest using urllib.request.url2pathname() but
# but that converts forward slashes to backslashes and this causes
# its own set of problems.
if url.startswith('file://'):
filename = urllib.parse.urlparse(url).path
if re.match(r'^/[a-zA-Z]:', filename):
filename = filename[1:]
return filename if return_filename else open(filename, "rb")
assert is_url(url)
# Lookup from cache.
if cache_dir is None:
cache_dir = make_cache_dir_path('downloads')
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
if cache:
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
if len(cache_files) == 1:
filename = cache_files[0]
return filename if return_filename else open(filename, "rb")
# Download.
url_name = None
url_data = None
with requests.Session() as session:
if verbose:
print("Downloading %s ..." % url, end="", flush=True)
for attempts_left in reversed(range(num_attempts)):
try:
with session.get(url) as res:
res.raise_for_status()
if len(res.content) == 0:
raise IOError("No data received")
if len(res.content) < 8192:
content_str = res.content.decode("utf-8")
if "download_warning" in res.headers.get("Set-Cookie", ""):
links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
if len(links) == 1:
url = requests.compat.urljoin(url, links[0])
raise IOError("Google Drive virus checker nag")
if "Google Drive - Quota exceeded" in content_str:
raise IOError("Google Drive download quota exceeded -- please try again later")
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
url_name = match[1] if match else url
url_data = res.content
if verbose:
print(" done")
break
except KeyboardInterrupt:
raise
except:
if not attempts_left:
if verbose:
print(" failed")
raise
if verbose:
print(".", end="", flush=True)
# Save to cache.
if cache:
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
os.makedirs(cache_dir, exist_ok=True)
with open(temp_file, "wb") as f:
f.write(url_data)
os.replace(temp_file, cache_file) # atomic
if return_filename:
return cache_file
# Return data as file object.
assert not return_filename
return io.BytesIO(url_data) | Download the given URL and return a binary-mode file object to access the data. |
9,285 | import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
def subprocess_fn(rank, c, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(c.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if c.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=c.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=c.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if c.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
training_loop.training_loop(rank=rank, **c)
def launch_training(c, desc, outdir, dry_run):
dnnlib.util.Logger(should_flush=True)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
c.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{desc}')
assert not os.path.exists(c.run_dir)
# Print options.
print()
print('Training options:')
print(json.dumps(c, indent=2))
print()
print(f'Output directory: {c.run_dir}')
print(f'Number of GPUs: {c.num_gpus}')
print(f'Batch size: {c.batch_size} images')
print(f'Training duration: {c.total_kimg} kimg')
print(f'Dataset path: {c.training_set_kwargs.path}')
print(f'Dataset size: {c.training_set_kwargs.max_size} images')
print(f'Dataset resolution: {c.training_set_kwargs.resolution}')
print(f'Dataset labels: {c.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {c.training_set_kwargs.xflip}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(c.run_dir)
with open(os.path.join(c.run_dir, 'training_options.json'), 'wt') as f:
json.dump(c, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if c.num_gpus == 1:
subprocess_fn(rank=0, c=c, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(c, temp_dir), nprocs=c.num_gpus) | null |
9,286 | import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
def init_dataset_kwargs(data):
try:
dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs) # Subclass of training.dataset.Dataset.
dataset_kwargs.resolution = dataset_obj.resolution # Be explicit about resolution.
dataset_kwargs.use_labels = dataset_obj.has_labels # Be explicit about labels.
dataset_kwargs.max_size = len(dataset_obj) # Be explicit about dataset size.
return dataset_kwargs, dataset_obj.name
except IOError as err:
raise click.ClickException(f'--data: {err}') | null |
9,287 | import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
def parse_comma_separated_list(s):
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',') | null |
9,288 | import click
import pickle
import re
import copy
import numpy as np
import torch
import dnnlib
from torch_utils import misc
def load_network_pkl(f, force_fp16=False):
data = _LegacyUnpickler(f).load()
# Legacy TensorFlow pickle => convert.
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
tf_G, tf_D, tf_Gs = data
G = convert_tf_generator(tf_G)
D = convert_tf_discriminator(tf_D)
G_ema = convert_tf_generator(tf_Gs)
data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G_ema'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
for key in ['G', 'D', 'G_ema']:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs)
fp16_kwargs.num_fp16_res = 4
fp16_kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
The provided code snippet includes necessary dependencies for implementing the `convert_network_pickle` function. Write a Python function `def convert_network_pickle(source, dest, force_fp16)` to solve the following problem:
Convert legacy network pickle into the native PyTorch format. The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA. It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks. Example: \b python legacy.py \\ --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\ --dest=stylegan2-cat-config-f.pkl
Here is the function:
def convert_network_pickle(source, dest, force_fp16):
"""Convert legacy network pickle into the native PyTorch format.
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
Example:
\b
python legacy.py \\
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
--dest=stylegan2-cat-config-f.pkl
"""
print(f'Loading "{source}"...')
with dnnlib.util.open_url(source) as f:
data = load_network_pkl(f, force_fp16=force_fp16)
print(f'Saving "{dest}"...')
with open(dest, 'wb') as f:
pickle.dump(data, f)
print('Done.') | Convert legacy network pickle into the native PyTorch format. The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA. It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks. Example: \b python legacy.py \\ --source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\ --dest=stylegan2-cat-config-f.pkl |
9,289 | import copy
import numpy as np
import torch
from . import metric_utils
def slerp(a, b, t):
a = a / a.norm(dim=-1, keepdim=True)
b = b / b.norm(dim=-1, keepdim=True)
d = (a * b).sum(dim=-1, keepdim=True)
p = t * torch.acos(d)
c = b - d * a
c = c / c.norm(dim=-1, keepdim=True)
d = a * torch.cos(p) + c * torch.sin(p)
d = d / d.norm(dim=-1, keepdim=True)
return d | null |
9,290 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
_metric_dict = dict()
def register_metric(fn):
assert callable(fn)
_metric_dict[fn.__name__] = fn
return fn | null |
9,291 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def fid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000)
return dict(fid50k_full=fid) | null |
9,292 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def kid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k_full=kid) | null |
9,293 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def pr50k3_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
precision, recall = precision_recall.compute_pr(opts, max_real=200000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_full_precision=precision, pr50k3_full_recall=recall) | null |
9,294 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def ppl2_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=False, batch_size=2)
return dict(ppl2_wend=ppl) | null |
9,295 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def eqt50k_int(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqt_int=True)
return dict(eqt50k_int=psnr) | null |
9,296 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def eqt50k_frac(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqt_frac=True)
return dict(eqt50k_frac=psnr) | null |
9,297 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def eqr50k(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqr=True)
return dict(eqr50k=psnr) | null |
9,298 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def fid50k(opts):
opts.dataset_kwargs.update(max_size=None)
fid = frechet_inception_distance.compute_fid(opts, max_real=50000, num_gen=50000)
return dict(fid50k=fid) | null |
9,299 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def kid50k(opts):
opts.dataset_kwargs.update(max_size=None)
kid = kernel_inception_distance.compute_kid(opts, max_real=50000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k=kid) | null |
9,300 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def pr50k3(opts):
opts.dataset_kwargs.update(max_size=None)
precision, recall = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_precision=precision, pr50k3_recall=recall) | null |
9,301 | import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
def is50k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
mean, std = inception_score.compute_is(opts, num_gen=50000, num_splits=10)
return dict(is50k_mean=mean, is50k_std=std) | null |
9,302 | import os
import click
import json
import tempfile
import copy
import torch
import dnnlib
import legacy
from metrics import metric_main
from metrics import metric_utils
from torch_utils import training_stats
from torch_utils import custom_ops
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
def parse_comma_separated_list(s):
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',') | null |
9,303 | import os
import click
import json
import tempfile
import copy
import torch
import dnnlib
import legacy
from metrics import metric_main
from metrics import metric_utils
from torch_utils import training_stats
from torch_utils import custom_ops
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
# Init torch.distributed.
if args.num_gpus > 1:
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0 or not args.verbose:
custom_ops.verbosity = 'none'
# Configure torch.
device = torch.device('cuda', rank)
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
conv2d_gradfix.enabled = True
# Print network summary.
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
if rank == 0 and args.verbose:
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
# Calculate each metric.
for metric in args.metrics:
if rank == 0 and args.verbose:
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs,
num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if rank == 0 and args.verbose:
print()
# Done.
if rank == 0 and args.verbose:
print('Exiting...')
The provided code snippet includes necessary dependencies for implementing the `calc_metrics` function. Write a Python function `def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose)` to solve the following problem:
Calculate quality metrics for previous training run or pretrained network pickle. Examples: \b # Previous training run: look up options automatically, save result to JSONL file. python calc_metrics.py --metrics=eqt50k_int,eqr50k \\ --network=~/training-runs/00000-stylegan3-r-mydataset/network-snapshot-000000.pkl \b # Pre-trained network pickle: specify dataset explicitly, print result to stdout. python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq-1024x1024.zip --mirror=1 \\ --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl \b Recommended metrics: fid50k_full Frechet inception distance against the full dataset. kid50k_full Kernel inception distance against the full dataset. pr50k3_full Precision and recall againt the full dataset. ppl2_wend Perceptual path length in W, endpoints, full image. eqt50k_int Equivariance w.r.t. integer translation (EQ-T). eqt50k_frac Equivariance w.r.t. fractional translation (EQ-T_frac). eqr50k Equivariance w.r.t. rotation (EQ-R). \b Legacy metrics: fid50k Frechet inception distance against 50k real images. kid50k Kernel inception distance against 50k real images. pr50k3 Precision and recall against 50k real images. is50k Inception score for CIFAR-10.
Here is the function:
def calc_metrics(ctx, network_pkl, metrics, data, mirror, gpus, verbose):
"""Calculate quality metrics for previous training run or pretrained network pickle.
Examples:
\b
# Previous training run: look up options automatically, save result to JSONL file.
python calc_metrics.py --metrics=eqt50k_int,eqr50k \\
--network=~/training-runs/00000-stylegan3-r-mydataset/network-snapshot-000000.pkl
\b
# Pre-trained network pickle: specify dataset explicitly, print result to stdout.
python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq-1024x1024.zip --mirror=1 \\
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl
\b
Recommended metrics:
fid50k_full Frechet inception distance against the full dataset.
kid50k_full Kernel inception distance against the full dataset.
pr50k3_full Precision and recall againt the full dataset.
ppl2_wend Perceptual path length in W, endpoints, full image.
eqt50k_int Equivariance w.r.t. integer translation (EQ-T).
eqt50k_frac Equivariance w.r.t. fractional translation (EQ-T_frac).
eqr50k Equivariance w.r.t. rotation (EQ-R).
\b
Legacy metrics:
fid50k Frechet inception distance against 50k real images.
kid50k Kernel inception distance against 50k real images.
pr50k3 Precision and recall against 50k real images.
is50k Inception score for CIFAR-10.
"""
dnnlib.util.Logger(should_flush=True)
# Validate arguments.
args = dnnlib.EasyDict(metrics=metrics, num_gpus=gpus, network_pkl=network_pkl, verbose=verbose)
if not all(metric_main.is_valid_metric(metric) for metric in args.metrics):
ctx.fail('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
if not args.num_gpus >= 1:
ctx.fail('--gpus must be at least 1')
# Load network.
if not dnnlib.util.is_url(network_pkl, allow_file_urls=True) and not os.path.isfile(network_pkl):
ctx.fail('--network must point to a file or URL')
if args.verbose:
print(f'Loading network from "{network_pkl}"...')
with dnnlib.util.open_url(network_pkl, verbose=args.verbose) as f:
network_dict = legacy.load_network_pkl(f)
args.G = network_dict['G_ema'] # subclass of torch.nn.Module
# Initialize dataset options.
if data is not None:
args.dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data)
elif network_dict['training_set_kwargs'] is not None:
args.dataset_kwargs = dnnlib.EasyDict(network_dict['training_set_kwargs'])
else:
ctx.fail('Could not look up dataset options; please specify --data')
# Finalize dataset options.
args.dataset_kwargs.resolution = args.G.img_resolution
args.dataset_kwargs.use_labels = (args.G.c_dim != 0)
if mirror is not None:
args.dataset_kwargs.xflip = mirror
# Print dataset options.
if args.verbose:
print('Dataset options:')
print(json.dumps(args.dataset_kwargs, indent=2))
# Locate run dir.
args.run_dir = None
if os.path.isfile(network_pkl):
pkl_dir = os.path.dirname(network_pkl)
if os.path.isfile(os.path.join(pkl_dir, 'training_options.json')):
args.run_dir = pkl_dir
# Launch processes.
if args.verbose:
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if args.num_gpus == 1:
subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus) | Calculate quality metrics for previous training run or pretrained network pickle. Examples: \b # Previous training run: look up options automatically, save result to JSONL file. python calc_metrics.py --metrics=eqt50k_int,eqr50k \\ --network=~/training-runs/00000-stylegan3-r-mydataset/network-snapshot-000000.pkl \b # Pre-trained network pickle: specify dataset explicitly, print result to stdout. python calc_metrics.py --metrics=fid50k_full --data=~/datasets/ffhq-1024x1024.zip --mirror=1 \\ --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-ffhq-1024x1024.pkl \b Recommended metrics: fid50k_full Frechet inception distance against the full dataset. kid50k_full Kernel inception distance against the full dataset. pr50k3_full Precision and recall againt the full dataset. ppl2_wend Perceptual path length in W, endpoints, full image. eqt50k_int Equivariance w.r.t. integer translation (EQ-T). eqt50k_frac Equivariance w.r.t. fractional translation (EQ-T_frac). eqr50k Equivariance w.r.t. rotation (EQ-R). \b Legacy metrics: fid50k Frechet inception distance against 50k real images. kid50k Kernel inception distance against 50k real images. pr50k3 Precision and recall against 50k real images. is50k Inception score for CIFAR-10. |
9,304 | import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import imageio
import numpy as np
import scipy.interpolate
import torch
from tqdm import tqdm
import mrcfile
import legacy
from camera_utils import LookAtPoseSampler
from torch_utils import misc
The provided code snippet includes necessary dependencies for implementing the `parse_range` function. Write a Python function `def parse_range(s: Union[str, List[int]]) -> List[int]` to solve the following problem:
Parse a comma separated list of numbers or ranges and return a list of ints. Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
Here is the function:
def parse_range(s: Union[str, List[int]]) -> List[int]:
'''Parse a comma separated list of numbers or ranges and return a list of ints.
Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
'''
if isinstance(s, list): return s
ranges = []
range_re = re.compile(r'^(\d+)-(\d+)$')
for p in s.split(','):
if m := range_re.match(p):
ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
else:
ranges.append(int(p))
return ranges | Parse a comma separated list of numbers or ranges and return a list of ints. Example: '1,2,5-10' returns [1, 2, 5, 6, 7] |
9,305 | import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import imageio
import numpy as np
import scipy.interpolate
import torch
from tqdm import tqdm
import mrcfile
import legacy
from camera_utils import LookAtPoseSampler
from torch_utils import misc
The provided code snippet includes necessary dependencies for implementing the `parse_tuple` function. Write a Python function `def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]` to solve the following problem:
Parse a 'M,N' or 'MxN' integer tuple. Example: '4x2' returns (4,2) '0,1' returns (0,1)
Here is the function:
def parse_tuple(s: Union[str, Tuple[int,int]]) -> Tuple[int, int]:
'''Parse a 'M,N' or 'MxN' integer tuple.
Example:
'4x2' returns (4,2)
'0,1' returns (0,1)
'''
if isinstance(s, tuple): return s
if m := re.match(r'^(\d+)[x,](\d+)$', s):
return (int(m.group(1)), int(m.group(2)))
raise ValueError(f'cannot parse tuple {s}') | Parse a 'M,N' or 'MxN' integer tuple. Example: '4x2' returns (4,2) '0,1' returns (0,1) |
9,306 | import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import imageio
import numpy as np
import scipy.interpolate
import torch
from tqdm import tqdm
import mrcfile
import legacy
from camera_utils import LookAtPoseSampler
from torch_utils import misc
def gen_interp_video(G, mp4: str, seeds, shuffle_seed=None, w_frames=60*4, kind='cubic', grid_dims=(1,1), num_keyframes=None, wraps=2, psi=1, truncation_cutoff=14, cfg='FFHQ', image_mode='image', gen_shapes=False, device=torch.device('cuda'), **video_kwargs):
grid_w = grid_dims[0]
grid_h = grid_dims[1]
if num_keyframes is None:
if len(seeds) % (grid_w*grid_h) != 0:
raise ValueError('Number of input seeds must be divisible by grid W*H')
num_keyframes = len(seeds) // (grid_w*grid_h)
all_seeds = np.zeros(num_keyframes*grid_h*grid_w, dtype=np.int64)
for idx in range(num_keyframes*grid_h*grid_w):
all_seeds[idx] = seeds[idx % len(seeds)]
if shuffle_seed is not None:
rng = np.random.RandomState(seed=shuffle_seed)
rng.shuffle(all_seeds)
camera_lookat_point = torch.tensor(G.rendering_kwargs['avg_camera_pivot'], device=device)
zs = torch.from_numpy(np.stack([np.random.RandomState(seed).randn(G.z_dim) for seed in all_seeds])).to(device)
cam2world_pose = LookAtPoseSampler.sample(3.14/2, 3.14/2, camera_lookat_point, radius=G.rendering_kwargs['avg_camera_radius'], device=device)
focal_length = 4.2647 if cfg != 'Shapenet' else 1.7074 # shapenet has higher FOV
intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device)
c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
c = c.repeat(len(zs), 1)
ws = G.mapping(z=zs, c=c, truncation_psi=psi, truncation_cutoff=truncation_cutoff)
_ = G.synthesis(ws[:1], c[:1]) # warm up
ws = ws.reshape(grid_h, grid_w, num_keyframes, *ws.shape[1:])
# Interpolation.
grid = []
for yi in range(grid_h):
row = []
for xi in range(grid_w):
x = np.arange(-num_keyframes * wraps, num_keyframes * (wraps + 1))
y = np.tile(ws[yi][xi].cpu().numpy(), [wraps * 2 + 1, 1, 1])
interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=0)
row.append(interp)
grid.append(row)
# Render video.
max_batch = 10000000
voxel_resolution = 512
video_out = imageio.get_writer(mp4, mode='I', fps=60, codec='libx264', **video_kwargs)
if gen_shapes:
outdir = 'interpolation_{}_{}/'.format(all_seeds[0], all_seeds[1])
os.makedirs(outdir, exist_ok=True)
all_poses = []
for frame_idx in tqdm(range(num_keyframes * w_frames)):
imgs = []
for yi in range(grid_h):
for xi in range(grid_w):
pitch_range = 0.25
yaw_range = 0.35
cam2world_pose = LookAtPoseSampler.sample(3.14/2 + yaw_range * np.sin(2 * 3.14 * frame_idx / (num_keyframes * w_frames)),
3.14/2 -0.05 + pitch_range * np.cos(2 * 3.14 * frame_idx / (num_keyframes * w_frames)),
camera_lookat_point, radius=G.rendering_kwargs['avg_camera_radius'], device=device)
all_poses.append(cam2world_pose.squeeze().cpu().numpy())
focal_length = 4.2647 if cfg != 'Shapenet' else 1.7074 # shapenet has higher FOV
intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device)
c = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
interp = grid[yi][xi]
w = torch.from_numpy(interp(frame_idx / w_frames)).to(device)
entangle = 'camera'
if entangle == 'conditioning':
c_forward = torch.cat([LookAtPoseSampler.sample(3.14/2,
3.14/2,
camera_lookat_point,
radius=G.rendering_kwargs['avg_camera_radius'], device=device).reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
w_c = G.mapping(z=zs[0:1], c=c[0:1], truncation_psi=psi, truncation_cutoff=truncation_cutoff)
img = G.synthesis(ws=w_c, c=c_forward, noise_mode='const')[image_mode][0]
elif entangle == 'camera':
img = G.synthesis(ws=w.unsqueeze(0), c=c[0:1], noise_mode='const')[image_mode][0]
elif entangle == 'both':
w_c = G.mapping(z=zs[0:1], c=c[0:1], truncation_psi=psi, truncation_cutoff=truncation_cutoff)
img = G.synthesis(ws=w_c, c=c[0:1], noise_mode='const')[image_mode][0]
if image_mode == 'image_depth':
img = -img
img = (img - img.min()) / (img.max() - img.min()) * 2 - 1
imgs.append(img)
if gen_shapes:
# generate shapes
print('Generating shape for frame %d / %d ...' % (frame_idx, num_keyframes * w_frames))
samples, voxel_origin, voxel_size = create_samples(N=voxel_resolution, voxel_origin=[0, 0, 0], cube_length=G.rendering_kwargs['box_warp'])
samples = samples.to(device)
sigmas = torch.zeros((samples.shape[0], samples.shape[1], 1), device=device)
transformed_ray_directions_expanded = torch.zeros((samples.shape[0], max_batch, 3), device=device)
transformed_ray_directions_expanded[..., -1] = -1
head = 0
with tqdm(total = samples.shape[1]) as pbar:
with torch.no_grad():
while head < samples.shape[1]:
torch.manual_seed(0)
sigma = G.sample_mixed(samples[:, head:head+max_batch], transformed_ray_directions_expanded[:, :samples.shape[1]-head], w.unsqueeze(0), truncation_psi=psi, noise_mode='const')['sigma']
sigmas[:, head:head+max_batch] = sigma
head += max_batch
pbar.update(max_batch)
sigmas = sigmas.reshape((voxel_resolution, voxel_resolution, voxel_resolution)).cpu().numpy()
sigmas = np.flip(sigmas, 0)
pad = int(30 * voxel_resolution / 256)
pad_top = int(38 * voxel_resolution / 256)
sigmas[:pad] = 0
sigmas[-pad:] = 0
sigmas[:, :pad] = 0
sigmas[:, -pad_top:] = 0
sigmas[:, :, :pad] = 0
sigmas[:, :, -pad:] = 0
output_ply = True
if output_ply:
from shape_utils import convert_sdf_samples_to_ply
convert_sdf_samples_to_ply(np.transpose(sigmas, (2, 1, 0)), [0, 0, 0], 1, os.path.join(outdir, f'{frame_idx:04d}_shape.ply'), level=10)
else: # output mrc
with mrcfile.new_mmap(outdir + f'{frame_idx:04d}_shape.mrc', overwrite=True, shape=sigmas.shape, mrc_mode=2) as mrc:
mrc.data[:] = sigmas
video_out.append_data(layout_grid(torch.stack(imgs), grid_w=grid_w, grid_h=grid_h))
video_out.close()
all_poses = np.stack(all_poses)
if gen_shapes:
print(all_poses.shape)
with open(mp4.replace('.mp4', '_trajectory.npy'), 'wb') as f:
np.save(f, all_poses)
The provided code snippet includes necessary dependencies for implementing the `generate_images` function. Write a Python function `def generate_images( network_pkl: str, seeds: List[int], shuffle_seed: Optional[int], truncation_psi: float, truncation_cutoff: int, grid: Tuple[int,int], num_keyframes: Optional[int], w_frames: int, outdir: str, reload_modules: bool, cfg: str, image_mode: str, sampling_multiplier: float, nrr: Optional[int], shapes: bool, interpolate: bool, )` to solve the following problem:
Render a latent vector interpolation video. Examples: \b # Render a 4x2 grid of interpolations for seeds 0 through 31. python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-31 --grid=4x2 \\ --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl Animation length and seed keyframes: The animation length is either determined based on the --seeds value or explicitly specified using the --num-keyframes option. When num keyframes is specified with --num-keyframes, the output video length will be 'num_keyframes*w_frames' frames. If --num-keyframes is not specified, the number of seeds given with --seeds must be divisible by grid size W*H (--grid). In this case the output video length will be '# seeds/(w*h)*w_frames' frames.
Here is the function:
def generate_images(
network_pkl: str,
seeds: List[int],
shuffle_seed: Optional[int],
truncation_psi: float,
truncation_cutoff: int,
grid: Tuple[int,int],
num_keyframes: Optional[int],
w_frames: int,
outdir: str,
reload_modules: bool,
cfg: str,
image_mode: str,
sampling_multiplier: float,
nrr: Optional[int],
shapes: bool,
interpolate: bool,
):
"""Render a latent vector interpolation video.
Examples:
\b
# Render a 4x2 grid of interpolations for seeds 0 through 31.
python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-31 --grid=4x2 \\
--network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl
Animation length and seed keyframes:
The animation length is either determined based on the --seeds value or explicitly
specified using the --num-keyframes option.
When num keyframes is specified with --num-keyframes, the output video length
will be 'num_keyframes*w_frames' frames.
If --num-keyframes is not specified, the number of seeds given with
--seeds must be divisible by grid size W*H (--grid). In this case the
output video length will be '# seeds/(w*h)*w_frames' frames.
"""
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
print('Loading networks from "%s"...' % network_pkl)
device = torch.device('cuda')
with dnnlib.util.open_url(network_pkl) as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
G.rendering_kwargs['depth_resolution'] = int(G.rendering_kwargs['depth_resolution'] * sampling_multiplier)
G.rendering_kwargs['depth_resolution_importance'] = int(G.rendering_kwargs['depth_resolution_importance'] * sampling_multiplier)
if nrr is not None: G.neural_rendering_resolution = nrr
if truncation_cutoff == 0:
truncation_psi = 1.0 # truncation cutoff of 0 means no truncation anyways
if truncation_psi == 1.0:
truncation_cutoff = 14 # no truncation so doesn't matter where we cutoff
if interpolate:
output = os.path.join(outdir, 'interpolation.mp4')
gen_interp_video(G=G, mp4=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, seeds=seeds, shuffle_seed=shuffle_seed, psi=truncation_psi, truncation_cutoff=truncation_cutoff, cfg=cfg, image_mode=image_mode, gen_shapes=shapes)
else:
for seed in seeds:
output = os.path.join(outdir, f'{seed}.mp4')
seeds_ = [seed]
gen_interp_video(G=G, mp4=output, bitrate='10M', grid_dims=grid, num_keyframes=num_keyframes, w_frames=w_frames, seeds=seeds_, shuffle_seed=shuffle_seed, psi=truncation_psi, truncation_cutoff=truncation_cutoff, cfg=cfg, image_mode=image_mode) | Render a latent vector interpolation video. Examples: \b # Render a 4x2 grid of interpolations for seeds 0 through 31. python gen_video.py --output=lerp.mp4 --trunc=1 --seeds=0-31 --grid=4x2 \\ --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl Animation length and seed keyframes: The animation length is either determined based on the --seeds value or explicitly specified using the --num-keyframes option. When num keyframes is specified with --num-keyframes, the output video length will be 'num_keyframes*w_frames' frames. If --num-keyframes is not specified, the number of seeds given with --seeds must be divisible by grid size W*H (--grid). In this case the output video length will be '# seeds/(w*h)*w_frames' frames. |
9,307 | import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
def normalize_2nd_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt() | null |
9,308 | import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling upfirdn2d.setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
assert isinstance(groups, int) and (groups >= 1)
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
px0, px1, py0, py1 = _parse_padding(padding)
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
# Fallback: Generic reference implementation.
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x | null |
9,309 | import torch
def sample_cross_section(G, ws, resolution=256, w=1.2):
axis=0
A, B = torch.meshgrid(torch.linspace(w/2, -w/2, resolution, device=ws.device), torch.linspace(-w/2, w/2, resolution, device=ws.device), indexing='ij')
A, B = A.reshape(-1, 1), B.reshape(-1, 1)
C = torch.zeros_like(A)
coordinates = [A, B]
coordinates.insert(axis, C)
coordinates = torch.cat(coordinates, dim=-1).expand(ws.shape[0], -1, -1)
sigma = G.sample_mixed(coordinates, torch.randn_like(coordinates), ws)['sigma']
return sigma.reshape(-1, 1, resolution, resolution) | null |
9,310 | import numpy as np
import torch
from torch_utils import persistence
from torch_utils.ops import upfirdn2d
from training.networks_stylegan2 import DiscriminatorBlock, MappingNetwork, DiscriminatorEpilogue
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
def filtered_resizing(image_orig_tensor, size, f, filter_mode='antialiased'):
if filter_mode == 'antialiased':
ada_filtered_64 = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', align_corners=False, antialias=True)
elif filter_mode == 'classic':
ada_filtered_64 = upfirdn2d.upsample2d(image_orig_tensor, f, up=2)
ada_filtered_64 = torch.nn.functional.interpolate(ada_filtered_64, size=(size * 2 + 2, size * 2 + 2), mode='bilinear', align_corners=False)
ada_filtered_64 = upfirdn2d.downsample2d(ada_filtered_64, f, down=2, flip_filter=True, padding=-1)
elif filter_mode == 'none':
ada_filtered_64 = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', align_corners=False)
elif type(filter_mode) == float:
assert 0 < filter_mode < 1
filtered = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', align_corners=False, antialias=True)
aliased = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', align_corners=False, antialias=False)
ada_filtered_64 = (1 - filter_mode) * aliased + (filter_mode) * filtered
return ada_filtered_64 | null |
9,311 | import numpy as np
import scipy.signal
import scipy.optimize
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import filtered_lrelu
from torch_utils.ops import bias_act
def modulated_conv2d(
x, # Input tensor: [batch_size, in_channels, in_height, in_width]
w, # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width]
s, # Style tensor: [batch_size, in_channels]
demodulate = True, # Apply weight demodulation?
padding = 0, # Padding: int or [padH, padW]
input_gain = None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels]
):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(x.shape[0])
out_channels, in_channels, kh, kw = w.shape
misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(s, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs.
if demodulate:
w = w * w.square().mean([1,2,3], keepdim=True).rsqrt()
s = s * s.square().mean().rsqrt()
# Modulate weights.
w = w.unsqueeze(0) # [NOIkk]
w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Demodulate weights.
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Apply input scaling.
if input_gain is not None:
input_gain = input_gain.expand(batch_size, in_channels) # [NI]
w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Execute as one fused op using grouped convolution.
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_gradfix.conv2d(input=x, weight=w.to(x.dtype), padding=padding, groups=batch_size)
x = x.reshape(batch_size, -1, *x.shape[2:])
return x | null |
9,312 | import math
import torch
import torch.nn as nn
from training.volumetric_rendering.ray_marcher import MipRayMarcher2
from training.volumetric_rendering import math_utils
The provided code snippet includes necessary dependencies for implementing the `generate_planes` function. Write a Python function `def generate_planes()` to solve the following problem:
Defines planes by the three vectors that form the "axes" of the plane. Should work with arbitrary number of planes and planes of arbitrary orientation.
Here is the function:
def generate_planes():
"""
Defines planes by the three vectors that form the "axes" of the
plane. Should work with arbitrary number of planes and planes of
arbitrary orientation.
"""
return torch.tensor([[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
[[1, 0, 0],
[0, 0, 1],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]], dtype=torch.float32) | Defines planes by the three vectors that form the "axes" of the plane. Should work with arbitrary number of planes and planes of arbitrary orientation. |
9,313 | import math
import torch
import torch.nn as nn
from training.volumetric_rendering.ray_marcher import MipRayMarcher2
from training.volumetric_rendering import math_utils
def project_onto_planes(planes, coordinates):
def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):
assert padding_mode == 'zeros'
N, n_planes, C, H, W = plane_features.shape
_, M, _ = coordinates.shape
plane_features = plane_features.view(N*n_planes, C, H, W)
coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)
return output_features | null |
9,314 | import math
import torch
import torch.nn as nn
from training.volumetric_rendering.ray_marcher import MipRayMarcher2
from training.volumetric_rendering import math_utils
The provided code snippet includes necessary dependencies for implementing the `sample_from_3dgrid` function. Write a Python function `def sample_from_3dgrid(grid, coordinates)` to solve the following problem:
Expects coordinates in shape (batch_size, num_points_per_batch, 3) Expects grid in shape (1, channels, H, W, D) (Also works if grid has batch size) Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)
Here is the function:
def sample_from_3dgrid(grid, coordinates):
"""
Expects coordinates in shape (batch_size, num_points_per_batch, 3)
Expects grid in shape (1, channels, H, W, D)
(Also works if grid has batch size)
Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)
"""
batch_size, n_coords, n_dims = coordinates.shape
sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1),
coordinates.reshape(batch_size, 1, 1, -1, n_dims),
mode='bilinear', padding_mode='zeros', align_corners=False)
N, C, H, W, D = sampled_features.shape
sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C)
return sampled_features | Expects coordinates in shape (batch_size, num_points_per_batch, 3) Expects grid in shape (1, channels, H, W, D) (Also works if grid has batch size) Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels) |
9,315 | import torch
The provided code snippet includes necessary dependencies for implementing the `transform_vectors` function. Write a Python function `def transform_vectors(matrix: torch.Tensor, vectors4: torch.Tensor) -> torch.Tensor` to solve the following problem:
Left-multiplies MxM @ NxM. Returns NxM.
Here is the function:
def transform_vectors(matrix: torch.Tensor, vectors4: torch.Tensor) -> torch.Tensor:
"""
Left-multiplies MxM @ NxM. Returns NxM.
"""
res = torch.matmul(vectors4, matrix.T)
return res | Left-multiplies MxM @ NxM. Returns NxM. |
9,316 | import torch
The provided code snippet includes necessary dependencies for implementing the `torch_dot` function. Write a Python function `def torch_dot(x: torch.Tensor, y: torch.Tensor)` to solve the following problem:
Dot product of two tensors.
Here is the function:
def torch_dot(x: torch.Tensor, y: torch.Tensor):
"""
Dot product of two tensors.
"""
return (x * y).sum(-1) | Dot product of two tensors. |
9,317 | import torch
The provided code snippet includes necessary dependencies for implementing the `get_ray_limits_box` function. Write a Python function `def get_ray_limits_box(rays_o: torch.Tensor, rays_d: torch.Tensor, box_side_length)` to solve the following problem:
Author: Petr Kellnhofer Intersects rays with the [-1, 1] NDC volume. Returns min and max distance of entry. Returns -1 for no intersection. https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
Here is the function:
def get_ray_limits_box(rays_o: torch.Tensor, rays_d: torch.Tensor, box_side_length):
"""
Author: Petr Kellnhofer
Intersects rays with the [-1, 1] NDC volume.
Returns min and max distance of entry.
Returns -1 for no intersection.
https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
"""
o_shape = rays_o.shape
rays_o = rays_o.detach().reshape(-1, 3)
rays_d = rays_d.detach().reshape(-1, 3)
bb_min = [-1*(box_side_length/2), -1*(box_side_length/2), -1*(box_side_length/2)]
bb_max = [1*(box_side_length/2), 1*(box_side_length/2), 1*(box_side_length/2)]
bounds = torch.tensor([bb_min, bb_max], dtype=rays_o.dtype, device=rays_o.device)
is_valid = torch.ones(rays_o.shape[:-1], dtype=bool, device=rays_o.device)
# Precompute inverse for stability.
invdir = 1 / rays_d
sign = (invdir < 0).long()
# Intersect with YZ plane.
tmin = (bounds.index_select(0, sign[..., 0])[..., 0] - rays_o[..., 0]) * invdir[..., 0]
tmax = (bounds.index_select(0, 1 - sign[..., 0])[..., 0] - rays_o[..., 0]) * invdir[..., 0]
# Intersect with XZ plane.
tymin = (bounds.index_select(0, sign[..., 1])[..., 1] - rays_o[..., 1]) * invdir[..., 1]
tymax = (bounds.index_select(0, 1 - sign[..., 1])[..., 1] - rays_o[..., 1]) * invdir[..., 1]
# Resolve parallel rays.
is_valid[torch.logical_or(tmin > tymax, tymin > tmax)] = False
# Use the shortest intersection.
tmin = torch.max(tmin, tymin)
tmax = torch.min(tmax, tymax)
# Intersect with XY plane.
tzmin = (bounds.index_select(0, sign[..., 2])[..., 2] - rays_o[..., 2]) * invdir[..., 2]
tzmax = (bounds.index_select(0, 1 - sign[..., 2])[..., 2] - rays_o[..., 2]) * invdir[..., 2]
# Resolve parallel rays.
is_valid[torch.logical_or(tmin > tzmax, tzmin > tmax)] = False
# Use the shortest intersection.
tmin = torch.max(tmin, tzmin)
tmax = torch.min(tmax, tzmax)
# Mark invalid.
tmin[torch.logical_not(is_valid)] = -1
tmax[torch.logical_not(is_valid)] = -2
return tmin.reshape(*o_shape[:-1], 1), tmax.reshape(*o_shape[:-1], 1) | Author: Petr Kellnhofer Intersects rays with the [-1, 1] NDC volume. Returns min and max distance of entry. Returns -1 for no intersection. https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection |
9,318 | import torch
The provided code snippet includes necessary dependencies for implementing the `linspace` function. Write a Python function `def linspace(start: torch.Tensor, stop: torch.Tensor, num: int)` to solve the following problem:
Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive. Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch.
Here is the function:
def linspace(start: torch.Tensor, stop: torch.Tensor, num: int):
"""
Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive.
Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch.
"""
# create a tensor of 'num' steps from 0 to 1
steps = torch.arange(num, dtype=torch.float32, device=start.device) / (num - 1)
# reshape the 'steps' tensor to [-1, *([1]*start.ndim)] to allow for broadcastings
# - using 'steps.reshape([-1, *([1]*start.ndim)])' would be nice here but torchscript
# "cannot statically infer the expected size of a list in this contex", hence the code below
for i in range(start.ndim):
steps = steps.unsqueeze(-1)
# the output starts at 'start' and increments until 'stop' in each dimension
out = start[None] + steps * (stop - start)[None]
return out | Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive. Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch. |
9,319 | import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def translate3d(tx, ty, tz, **kwargs):
return matrix(
[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1],
**kwargs) | null |
9,320 | import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def scale3d(sx, sy, sz, **kwargs):
return matrix(
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1],
**kwargs) | null |
9,321 | import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def rotate3d(v, theta, **kwargs):
vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2]
s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c
return matrix(
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
[0, 0, 0, 1],
**kwargs) | null |
9,322 | import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
def translate2d(tx, ty, **kwargs):
return matrix(
[1, 0, tx],
[0, 1, ty],
[0, 0, 1],
**kwargs)
def translate2d_inv(tx, ty, **kwargs):
return translate2d(-tx, -ty, **kwargs) | null |
9,323 | import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
def scale2d(sx, sy, **kwargs):
return matrix(
[sx, 0, 0],
[0, sy, 0],
[0, 0, 1],
**kwargs)
def scale2d_inv(sx, sy, **kwargs):
return scale2d(1 / sx, 1 / sy, **kwargs) | null |
9,324 | import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
def rotate2d(theta, **kwargs):
def rotate2d_inv(theta, **kwargs):
return rotate2d(-theta, **kwargs) | null |
9,325 | import glob
import os
import re
import dnnlib
import imgui
import numpy as np
from gui_utils import imgui_utils
from . import renderer
def _locate_results(pattern):
return pattern | null |
9,326 | import sys
import copy
import traceback
import numpy as np
import torch
import torch.fft
import torch.nn
import matplotlib.cm
import dnnlib
from torch_utils.ops import upfirdn2d
import legacy
from camera_utils import LookAtPoseSampler
def _construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1):
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
def _apply_affine_transformation(x, mat, up=4, **filter_kwargs):
_N, _C, H, W = x.shape
mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device)
# Construct filter.
f = _construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs)
assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1
p = f.shape[0] // 2
# Construct sampling grid.
theta = mat.inverse()
theta[:2, 2] *= 2
theta[0, 2] += 1 / up / W
theta[1, 2] += 1 / up / H
theta[0, :] *= W / (W + p / up * 2)
theta[1, :] *= H / (H + p / up * 2)
theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1])
g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False)
# Resample image.
y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p)
z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False)
# Form mask.
m = torch.zeros_like(y)
c = p * 2 + 1
m[:, :, c:-c, c:-c] = 1
m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False)
return z, m | null |
9,327 | import time
import plyfile
import glob
import logging
import numpy as np
import os
import random
import torch
import torch.utils.data
import trimesh
import skimage.measure
import argparse
import mrcfile
from tqdm import tqdm
def convert_sdf_samples_to_ply(
numpy_3d_sdf_tensor,
voxel_grid_origin,
voxel_size,
ply_filename_out,
offset=None,
scale=None,
level=0.0
):
def convert_mrc(input_filename, output_filename, isosurface_level=1):
with mrcfile.open(input_filename) as mrc:
convert_sdf_samples_to_ply(np.transpose(mrc.data, (2, 1, 0)), [0, 0, 0], 1, output_filename, level=isosurface_level) | null |
9,328 | import functools
from typing import Optional
import dnnlib
import numpy as np
import PIL.Image
import PIL.ImageFont
import scipy.ndimage
from . import gl_utils
def get_array(string, *, dropshadow_radius: int=None, **kwargs):
if dropshadow_radius is not None:
offset_x = int(np.ceil(dropshadow_radius*2/3))
offset_y = int(np.ceil(dropshadow_radius*2/3))
return _get_array_priv(string, dropshadow_radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs)
else:
return _get_array_priv(string, **kwargs)
def get_texture(string, bilinear=True, mipmap=True, **kwargs):
return gl_utils.Texture(image=get_array(string, **kwargs), bilinear=bilinear, mipmap=mipmap) | null |
9,329 | import contextlib
import imgui
def set_default_style(color_scheme='dark', spacing=9, indent=23, scrollbar=27):
s = imgui.get_style()
s.window_padding = [spacing, spacing]
s.item_spacing = [spacing, spacing]
s.item_inner_spacing = [spacing, spacing]
s.columns_min_spacing = spacing
s.indent_spacing = indent
s.scrollbar_size = scrollbar
s.frame_padding = [4, 3]
s.window_border_size = 1
s.child_border_size = 1
s.popup_border_size = 1
s.frame_border_size = 1
s.window_rounding = 0
s.child_rounding = 0
s.popup_rounding = 3
s.frame_rounding = 3
s.scrollbar_rounding = 3
s.grab_rounding = 3
getattr(imgui, f'style_colors_{color_scheme}')(s)
c0 = s.colors[imgui.COLOR_MENUBAR_BACKGROUND]
c1 = s.colors[imgui.COLOR_FRAME_BACKGROUND]
s.colors[imgui.COLOR_POPUP_BACKGROUND] = [x * 0.7 + y * 0.3 for x, y in zip(c0, c1)][:3] + [1] | null |
9,330 | import contextlib
import imgui
def scoped_by_object_id(method):
def decorator(self, *args, **kwargs):
imgui.push_id(str(id(self)))
res = method(self, *args, **kwargs)
imgui.pop_id()
return res
return decorator | null |
9,331 | import contextlib
import imgui
def grayed_out(cond=True):
if cond:
s = imgui.get_style()
text = s.colors[imgui.COLOR_TEXT_DISABLED]
grab = s.colors[imgui.COLOR_SCROLLBAR_GRAB]
back = s.colors[imgui.COLOR_MENUBAR_BACKGROUND]
imgui.push_style_color(imgui.COLOR_TEXT, *text)
imgui.push_style_color(imgui.COLOR_CHECK_MARK, *grab)
imgui.push_style_color(imgui.COLOR_SLIDER_GRAB, *grab)
imgui.push_style_color(imgui.COLOR_SLIDER_GRAB_ACTIVE, *grab)
imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND, *back)
imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND_HOVERED, *back)
imgui.push_style_color(imgui.COLOR_FRAME_BACKGROUND_ACTIVE, *back)
imgui.push_style_color(imgui.COLOR_BUTTON, *back)
imgui.push_style_color(imgui.COLOR_BUTTON_HOVERED, *back)
imgui.push_style_color(imgui.COLOR_BUTTON_ACTIVE, *back)
imgui.push_style_color(imgui.COLOR_HEADER, *back)
imgui.push_style_color(imgui.COLOR_HEADER_HOVERED, *back)
imgui.push_style_color(imgui.COLOR_HEADER_ACTIVE, *back)
imgui.push_style_color(imgui.COLOR_POPUP_BACKGROUND, *back)
yield
imgui.pop_style_color(14)
else:
yield
def collapsing_header(text, visible=None, flags=0, default=False, enabled=True, show=True):
expanded = False
if show:
if default:
flags |= imgui.TREE_NODE_DEFAULT_OPEN
if not enabled:
flags |= imgui.TREE_NODE_LEAF
with grayed_out(not enabled):
expanded, visible = imgui.collapsing_header(text, visible=visible, flags=flags)
expanded = expanded and enabled
return expanded, visible | null |
9,332 | import contextlib
import imgui
def button(label, width=0, enabled=True):
def popup_button(label, width=0, enabled=True):
if button(label, width, enabled):
imgui.open_popup(label)
opened = imgui.begin_popup(label)
return opened | null |
9,333 | import contextlib
import imgui
def item_width(width=None):
if width is not None:
imgui.push_item_width(width)
yield
imgui.pop_item_width()
else:
yield
def input_text(label, value, buffer_length, flags, width=None, help_text=''):
old_value = value
color = list(imgui.get_style().colors[imgui.COLOR_TEXT])
if value == '':
color[-1] *= 0.5
with item_width(width):
imgui.push_style_color(imgui.COLOR_TEXT, *color)
value = value if value != '' else help_text
changed, value = imgui.input_text(label, value, buffer_length, flags)
value = value if value != help_text else ''
imgui.pop_style_color(1)
if not flags & imgui.INPUT_TEXT_ENTER_RETURNS_TRUE:
changed = (value != old_value)
return changed, value | null |
9,334 | import contextlib
import imgui
def button(label, width=0, enabled=True):
with grayed_out(not enabled):
clicked = imgui.button(label, width=width)
clicked = clicked and enabled
return clicked
def drag_previous_control(enabled=True):
dragging = False
dx = 0
dy = 0
if imgui.begin_drag_drop_source(imgui.DRAG_DROP_SOURCE_NO_PREVIEW_TOOLTIP):
if enabled:
dragging = True
dx, dy = imgui.get_mouse_drag_delta()
imgui.reset_mouse_drag_delta()
imgui.end_drag_drop_source()
return dragging, dx, dy
def drag_button(label, width=0, enabled=True):
clicked = button(label, width=width, enabled=enabled)
dragging, dx, dy = drag_previous_control(enabled=enabled)
return clicked, dragging, dx, dy | null |
9,335 | import contextlib
import imgui
def drag_previous_control(enabled=True):
dragging = False
dx = 0
dy = 0
if imgui.begin_drag_drop_source(imgui.DRAG_DROP_SOURCE_NO_PREVIEW_TOOLTIP):
if enabled:
dragging = True
dx, dy = imgui.get_mouse_drag_delta()
imgui.reset_mouse_drag_delta()
imgui.end_drag_drop_source()
return dragging, dx, dy
def drag_hidden_window(label, x, y, width, height, enabled=True):
imgui.push_style_color(imgui.COLOR_WINDOW_BACKGROUND, 0, 0, 0, 0)
imgui.push_style_color(imgui.COLOR_BORDER, 0, 0, 0, 0)
imgui.set_next_window_position(x, y)
imgui.set_next_window_size(width, height)
imgui.begin(label, closable=False, flags=(imgui.WINDOW_NO_TITLE_BAR | imgui.WINDOW_NO_RESIZE | imgui.WINDOW_NO_MOVE))
dragging, dx, dy = drag_previous_control(enabled=enabled)
imgui.end()
imgui.pop_style_color(2)
return dragging, dx, dy | null |
9,336 | import os
import functools
import contextlib
import numpy as np
import OpenGL.GL as gl
import OpenGL.GL.ARB.texture_float
import dnnlib
def init_egl():
assert os.environ['PYOPENGL_PLATFORM'] == 'egl' # Must be set before importing OpenGL.
import OpenGL.EGL as egl
import ctypes
# Initialize EGL.
display = egl.eglGetDisplay(egl.EGL_DEFAULT_DISPLAY)
assert display != egl.EGL_NO_DISPLAY
major = ctypes.c_int32()
minor = ctypes.c_int32()
ok = egl.eglInitialize(display, major, minor)
assert ok
assert major.value * 10 + minor.value >= 14
# Choose config.
config_attribs = [
egl.EGL_RENDERABLE_TYPE, egl.EGL_OPENGL_BIT,
egl.EGL_SURFACE_TYPE, egl.EGL_PBUFFER_BIT,
egl.EGL_NONE
]
configs = (ctypes.c_int32 * 1)()
num_configs = ctypes.c_int32()
ok = egl.eglChooseConfig(display, config_attribs, configs, 1, num_configs)
assert ok
assert num_configs.value == 1
config = configs[0]
# Create dummy pbuffer surface.
surface_attribs = [
egl.EGL_WIDTH, 1,
egl.EGL_HEIGHT, 1,
egl.EGL_NONE
]
surface = egl.eglCreatePbufferSurface(display, config, surface_attribs)
assert surface != egl.EGL_NO_SURFACE
# Setup GL context.
ok = egl.eglBindAPI(egl.EGL_OPENGL_API)
assert ok
context = egl.eglCreateContext(display, config, egl.EGL_NO_CONTEXT, None)
assert context != egl.EGL_NO_CONTEXT
ok = egl.eglMakeCurrent(display, surface, surface, context)
assert ok | null |
9,337 | import os
import functools
import contextlib
import numpy as np
import OpenGL.GL as gl
import OpenGL.GL.ARB.texture_float
import dnnlib
def get_texture_format(dtype, channels):
return _texture_formats[(np.dtype(dtype).name, int(channels))]
def prepare_texture_data(image):
image = np.asarray(image)
if image.ndim == 2:
image = image[:, :, np.newaxis]
if image.dtype.name == 'float64':
image = image.astype('float32')
return image
def draw_pixels(image, *, pos=0, zoom=1, align=0, rint=True):
pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
zoom = np.broadcast_to(np.asarray(zoom, dtype='float32'), [2])
align = np.broadcast_to(np.asarray(align, dtype='float32'), [2])
image = prepare_texture_data(image)
height, width, channels = image.shape
size = zoom * [width, height]
pos = pos - size * align
if rint:
pos = np.rint(pos)
fmt = get_texture_format(image.dtype, channels)
gl.glPushAttrib(gl.GL_CURRENT_BIT | gl.GL_PIXEL_MODE_BIT)
gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT)
gl.glRasterPos2f(pos[0], pos[1])
gl.glPixelZoom(zoom[0], -zoom[1])
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl.glDrawPixels(width, height, fmt.format, fmt.type, image)
gl.glPopClientAttrib()
gl.glPopAttrib() | null |
9,338 | import os
import functools
import contextlib
import numpy as np
import OpenGL.GL as gl
import OpenGL.GL.ARB.texture_float
import dnnlib
def get_texture_format(dtype, channels):
return _texture_formats[(np.dtype(dtype).name, int(channels))]
def read_pixels(width, height, *, pos=0, dtype='uint8', channels=3):
pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
dtype = np.dtype(dtype)
fmt = get_texture_format(dtype, channels)
image = np.empty([height, width, channels], dtype=dtype)
gl.glPushClientAttrib(gl.GL_CLIENT_PIXEL_STORE_BIT)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)
gl.glReadPixels(int(np.round(pos[0])), int(np.round(pos[1])), width, height, fmt.format, fmt.type, image)
gl.glPopClientAttrib()
return np.flipud(image) | null |
9,339 | import os
import functools
import contextlib
import numpy as np
import OpenGL.GL as gl
import OpenGL.GL.ARB.texture_float
import dnnlib
def draw_shape(vertices, *, mode=gl.GL_TRIANGLE_FAN, pos=0, size=1, color=1, alpha=1):
assert vertices.ndim == 2 and vertices.shape[1] == 2
pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
size = np.broadcast_to(np.asarray(size, dtype='float32'), [2])
color = np.broadcast_to(np.asarray(color, dtype='float32'), [3])
alpha = np.clip(np.broadcast_to(np.asarray(alpha, dtype='float32'), []), 0, 1)
gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)
gl.glPushAttrib(gl.GL_CURRENT_BIT | gl.GL_TRANSFORM_BIT)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glPushMatrix()
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY)
gl.glVertexPointer(2, gl.GL_FLOAT, 0, vertices)
gl.glTexCoordPointer(2, gl.GL_FLOAT, 0, vertices)
gl.glTranslate(pos[0], pos[1], 0)
gl.glScale(size[0], size[1], 1)
gl.glColor4f(color[0] * alpha, color[1] * alpha, color[2] * alpha, alpha)
gl.glDrawArrays(mode, 0, vertices.shape[0])
gl.glPopMatrix()
gl.glPopAttrib()
gl.glPopClientAttrib()
def _setup_rect(rx, ry):
t = np.linspace(0, np.pi / 2, 1 if max(rx, ry) == 0 else 64)
s = 1 - np.sin(t); c = 1 - np.cos(t)
x = [c * rx, 1 - s * rx, 1 - c * rx, s * rx]
y = [s * ry, c * ry, 1 - s * ry, 1 - c * ry]
v = np.stack([x, y], axis=-1).reshape(-1, 2)
return v.astype('float32')
def draw_rect(*, pos=0, pos2=None, size=None, align=0, rint=False, color=1, alpha=1, rounding=0):
assert pos2 is None or size is None
pos = np.broadcast_to(np.asarray(pos, dtype='float32'), [2])
pos2 = np.broadcast_to(np.asarray(pos2, dtype='float32'), [2]) if pos2 is not None else None
size = np.broadcast_to(np.asarray(size, dtype='float32'), [2]) if size is not None else None
size = size if size is not None else pos2 - pos if pos2 is not None else np.array([1, 1], dtype='float32')
pos = pos - size * align
if rint:
pos = np.rint(pos)
rounding = np.broadcast_to(np.asarray(rounding, dtype='float32'), [2])
rounding = np.minimum(np.abs(rounding) / np.maximum(np.abs(size), 1e-8), 0.5)
if np.min(rounding) == 0:
rounding *= 0
vertices = _setup_rect(float(rounding[0]), float(rounding[1]))
draw_shape(vertices, mode=gl.GL_TRIANGLE_FAN, pos=pos, size=size, color=color, alpha=alpha) | null |
9,340 | import os
import functools
import contextlib
import numpy as np
import OpenGL.GL as gl
import OpenGL.GL.ARB.texture_float
import dnnlib
def draw_shape(vertices, *, mode=gl.GL_TRIANGLE_FAN, pos=0, size=1, color=1, alpha=1):
def _setup_circle(hole):
def draw_circle(*, center=0, radius=100, hole=0, color=1, alpha=1):
hole = np.broadcast_to(np.asarray(hole, dtype='float32'), [])
vertices = _setup_circle(float(hole))
draw_shape(vertices, mode=gl.GL_TRIANGLE_STRIP, pos=center, size=radius, color=color, alpha=alpha) | null |
9,341 | import re
import numpy as np
import torch
import dnnlib
from . import misc
_num_moments = 3
_counter_dtype = torch.float64
_sync_device = None
_sync_called = False
_counters = dict()
_cumulative = dict()
The provided code snippet includes necessary dependencies for implementing the `_sync` function. Write a Python function `def _sync(names)` to solve the following problem:
r"""Synchronize the global cumulative counters across devices and processes. Called internally by `Collector.update()`.
Here is the function:
def _sync(names):
r"""Synchronize the global cumulative counters across devices and
processes. Called internally by `Collector.update()`.
"""
if len(names) == 0:
return []
global _sync_called
_sync_called = True
# Collect deltas within current rank.
deltas = []
device = _sync_device if _sync_device is not None else torch.device('cpu')
for name in names:
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
for counter in _counters[name].values():
delta.add_(counter.to(device))
counter.copy_(torch.zeros_like(counter))
deltas.append(delta)
deltas = torch.stack(deltas)
# Sum deltas across ranks.
if _sync_device is not None:
torch.distributed.all_reduce(deltas)
# Update cumulative values.
deltas = deltas.cpu()
for idx, name in enumerate(names):
if name not in _cumulative:
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
_cumulative[name].add_(deltas[idx])
# Return name-value pairs.
return [(name, _cumulative[name]) for name in names] | r"""Synchronize the global cumulative counters across devices and processes. Called internally by `Collector.update()`. |
9,342 | import sys
import pickle
import io
import inspect
import copy
import uuid
import types
import dnnlib
_import_hooks = []
The provided code snippet includes necessary dependencies for implementing the `import_hook` function. Write a Python function `def import_hook(hook)` to solve the following problem:
r"""Register an import hook that is called whenever a persistent object is being unpickled. A typical use case is to patch the pickled source code to avoid errors and inconsistencies when the API of some imported module has changed. The hook should have the following signature: hook(meta) -> modified meta `meta` is an instance of `dnnlib.EasyDict` with the following fields: type: Type of the persistent object, e.g. `'class'`. version: Internal version number of `torch_utils.persistence`. module_src Original source code of the Python module. class_name: Class name in the original Python module. state: Internal state of the object. Example: @persistence.import_hook def wreck_my_network(meta): if meta.class_name == 'MyNetwork': print('MyNetwork is being imported. I will wreck it!') meta.module_src = meta.module_src.replace("True", "False") return meta
Here is the function:
def import_hook(hook):
r"""Register an import hook that is called whenever a persistent object
is being unpickled. A typical use case is to patch the pickled source
code to avoid errors and inconsistencies when the API of some imported
module has changed.
The hook should have the following signature:
hook(meta) -> modified meta
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
type: Type of the persistent object, e.g. `'class'`.
version: Internal version number of `torch_utils.persistence`.
module_src Original source code of the Python module.
class_name: Class name in the original Python module.
state: Internal state of the object.
Example:
@persistence.import_hook
def wreck_my_network(meta):
if meta.class_name == 'MyNetwork':
print('MyNetwork is being imported. I will wreck it!')
meta.module_src = meta.module_src.replace("True", "False")
return meta
"""
assert callable(hook)
_import_hooks.append(hook) | r"""Register an import hook that is called whenever a persistent object is being unpickled. A typical use case is to patch the pickled source code to avoid errors and inconsistencies when the API of some imported module has changed. The hook should have the following signature: hook(meta) -> modified meta `meta` is an instance of `dnnlib.EasyDict` with the following fields: type: Type of the persistent object, e.g. `'class'`. version: Internal version number of `torch_utils.persistence`. module_src Original source code of the Python module. class_name: Class name in the original Python module. state: Internal state of the object. Example: @persistence.import_hook def wreck_my_network(meta): if meta.class_name == 'MyNetwork': print('MyNetwork is being imported. I will wreck it!') meta.module_src = meta.module_src.replace("True", "False") return meta |
9,343 | import torch
def _should_use_custom_op():
class _GridSample2dForward(torch.autograd.Function):
def forward(ctx, input, grid):
def backward(ctx, grad_output):
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) | null |
9,344 | import os
import numpy as np
import torch
from .. import custom_ops
from .. import misc
from . import conv2d_gradfix
The provided code snippet includes necessary dependencies for implementing the `setup_filter` function. Write a Python function `def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None)` to solve the following problem:
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. Args: f: Torch tensor, numpy array, or python list of the shape `[filter_height, filter_width]` (non-separable), `[filter_taps]` (separable), `[]` (impulse), or `None` (identity). device: Result device (default: cpu). normalize: Normalize the filter so that it retains the magnitude for constant input signal (DC)? (default: True). flip_filter: Flip the filter? (default: False). gain: Overall scaling factor for signal magnitude (default: 1). separable: Return a separable filter? (default: select automatically). Returns: Float32 tensor of the shape `[filter_height, filter_width]` (non-separable) or `[filter_taps]` (separable).
Here is the function:
def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = (f.ndim == 1 and f.numel() >= 8)
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f | r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. Args: f: Torch tensor, numpy array, or python list of the shape `[filter_height, filter_width]` (non-separable), `[filter_taps]` (separable), `[]` (impulse), or `None` (identity). device: Result device (default: cpu). normalize: Normalize the filter so that it retains the magnitude for constant input signal (DC)? (default: True). flip_filter: Flip the filter? (default: False). gain: Overall scaling factor for signal magnitude (default: 1). separable: Return a separable filter? (default: select automatically). Returns: Float32 tensor of the shape `[filter_height, filter_width]` (non-separable) or `[filter_taps]` (separable). |
9,345 | import contextlib
import torch
weight_gradients_disabled = False
def no_weight_gradients(disable=True):
global weight_gradients_disabled
old = weight_gradients_disabled
if disable:
weight_gradients_disabled = True
yield
weight_gradients_disabled = old | null |
9,346 | import torch
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims+1:])
assert x.shape == shape
return x | null |
9,347 | import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
def profiled_function(fn):
def decorator(*args, **kwargs):
with torch.autograd.profiler.record_function(fn.__name__):
return fn(*args, **kwargs)
decorator.__name__ = fn.__name__
return decorator | null |
9,348 | import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
def ddp_sync(module, sync):
assert isinstance(module, torch.nn.Module)
if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
yield
else:
with module.no_sync():
yield | null |
9,349 | import functools
import gzip
import io
import json
import os
import pickle
import re
import sys
import tarfile
import zipfile
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import click
import numpy as np
import PIL.Image
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `parse_tuple` function. Write a Python function `def parse_tuple(s: str) -> Tuple[int, int]` to solve the following problem:
Parse a 'M,N' or 'MxN' integer tuple. Example: '4x2' returns (4,2) '0,1' returns (0,1)
Here is the function:
def parse_tuple(s: str) -> Tuple[int, int]:
'''Parse a 'M,N' or 'MxN' integer tuple.
Example:
'4x2' returns (4,2)
'0,1' returns (0,1)
'''
if m := re.match(r'^(\d+)[x,](\d+)$', s):
return (int(m.group(1)), int(m.group(2)))
raise ValueError(f'cannot parse tuple {s}') | Parse a 'M,N' or 'MxN' integer tuple. Example: '4x2' returns (4,2) '0,1' returns (0,1) |
9,350 | import functools
import gzip
import io
import json
import os
import pickle
import re
import sys
import tarfile
import zipfile
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
import click
import numpy as np
import PIL.Image
from tqdm import tqdm
def error(msg):
print('Error: ' + msg)
sys.exit(1)
def make_transform(
transform: Optional[str],
output_width: Optional[int],
output_height: Optional[int]
) -> Callable[[np.ndarray], Optional[np.ndarray]]:
def scale(width, height, img):
w = img.shape[1]
h = img.shape[0]
if width == w and height == h:
return img
img = PIL.Image.fromarray(img)
ww = width if width is not None else w
hh = height if height is not None else h
img = img.resize((ww, hh), PIL.Image.LANCZOS)
return np.array(img)
def center_crop(width, height, img):
crop = np.min(img.shape[:2])
img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((width, height), PIL.Image.LANCZOS)
return np.array(img)
def center_crop_wide(width, height, img):
ch = int(np.round(width * img.shape[0] / img.shape[1]))
if img.shape[1] < width or ch < height:
return None
img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2]
img = PIL.Image.fromarray(img, 'RGB')
img = img.resize((width, height), PIL.Image.LANCZOS)
img = np.array(img)
canvas = np.zeros([width, width, 3], dtype=np.uint8)
canvas[(width - height) // 2 : (width + height) // 2, :] = img
return canvas
if transform is None:
return functools.partial(scale, output_width, output_height)
if transform == 'center-crop':
if (output_width is None) or (output_height is None):
error ('must specify --resolution=WxH when using ' + transform + 'transform')
return functools.partial(center_crop, output_width, output_height)
if transform == 'center-crop-wide':
if (output_width is None) or (output_height is None):
error ('must specify --resolution=WxH when using ' + transform + ' transform')
return functools.partial(center_crop_wide, output_width, output_height)
assert False, 'unknown transform'
def open_dataset(source, *, max_images: Optional[int]):
if os.path.isdir(source):
if source.rstrip('/').endswith('_lmdb'):
return open_lmdb(source, max_images=max_images)
else:
return open_image_folder(source, max_images=max_images)
elif os.path.isfile(source):
if os.path.basename(source) == 'cifar-10-python.tar.gz':
return open_cifar10(source, max_images=max_images)
elif os.path.basename(source) == 'train-images-idx3-ubyte.gz':
return open_mnist(source, max_images=max_images)
elif file_ext(source) == 'zip':
return open_image_zip(source, max_images=max_images)
else:
assert False, 'unknown archive type'
else:
error(f'Missing input file or directory: {source}')
def open_dest(dest: str) -> Tuple[str, Callable[[str, Union[bytes, str]], None], Callable[[], None]]:
dest_ext = file_ext(dest)
if dest_ext == 'zip':
if os.path.dirname(dest) != '':
os.makedirs(os.path.dirname(dest), exist_ok=True)
zf = zipfile.ZipFile(file=dest, mode='w', compression=zipfile.ZIP_STORED)
def zip_write_bytes(fname: str, data: Union[bytes, str]):
zf.writestr(fname, data)
return '', zip_write_bytes, zf.close
else:
# If the output folder already exists, check that is is
# empty.
#
# Note: creating the output directory is not strictly
# necessary as folder_write_bytes() also mkdirs, but it's better
# to give an error message earlier in case the dest folder
# somehow cannot be created.
if os.path.isdir(dest) and len(os.listdir(dest)) != 0:
error('--dest folder must be empty')
os.makedirs(dest, exist_ok=True)
def folder_write_bytes(fname: str, data: Union[bytes, str]):
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as fout:
if isinstance(data, str):
data = data.encode('utf8')
fout.write(data)
return dest, folder_write_bytes, lambda: None
The provided code snippet includes necessary dependencies for implementing the `convert_dataset` function. Write a Python function `def convert_dataset( ctx: click.Context, source: str, dest: str, max_images: Optional[int], transform: Optional[str], resolution: Optional[Tuple[int, int]] )` to solve the following problem:
Convert an image dataset into a dataset archive usable with StyleGAN2 ADA PyTorch. The input dataset format is guessed from the --source argument: \b --source *_lmdb/ Load LSUN dataset --source cifar-10-python.tar.gz Load CIFAR-10 dataset --source train-images-idx3-ubyte.gz Load MNIST dataset --source path/ Recursively load all images from path/ --source dataset.zip Recursively load all images from dataset.zip Specifying the output format and path: \b --dest /path/to/dir Save output files under /path/to/dir --dest /path/to/dataset.zip Save output files into /path/to/dataset.zip The output dataset format can be either an image folder or an uncompressed zip archive. Zip archives makes it easier to move datasets around file servers and clusters, and may offer better training performance on network file systems. Images within the dataset archive will be stored as uncompressed PNG. Uncompressed PNGs can be efficiently decoded in the training loop. Class labels are stored in a file called 'dataset.json' that is stored at the dataset root folder. This file has the following structure: \b { "labels": [ ["00000/img00000000.png",6], ["00000/img00000001.png",9], ... repeated for every image in the dataset ["00049/img00049999.png",1] ] } If the 'dataset.json' file cannot be found, the dataset is interpreted as not containing class labels. Image scale/crop and resolution requirements: Output images must be square-shaped and they must all have the same power-of-two dimensions. To scale arbitrary input image size to a specific width and height, use the --resolution option. Output resolution will be either the original input resolution (if resolution was not specified) or the one specified with --resolution option. Use the --transform=center-crop or --transform=center-crop-wide options to apply a center crop transform on the input image. These options should be used with the --resolution option. For example: \b python dataset_tool.py --source LSUN/raw/cat_lmdb --dest /tmp/lsun_cat \\ --transform=center-crop-wide --resolution=512x384
Here is the function:
def convert_dataset(
ctx: click.Context,
source: str,
dest: str,
max_images: Optional[int],
transform: Optional[str],
resolution: Optional[Tuple[int, int]]
):
"""Convert an image dataset into a dataset archive usable with StyleGAN2 ADA PyTorch.
The input dataset format is guessed from the --source argument:
\b
--source *_lmdb/ Load LSUN dataset
--source cifar-10-python.tar.gz Load CIFAR-10 dataset
--source train-images-idx3-ubyte.gz Load MNIST dataset
--source path/ Recursively load all images from path/
--source dataset.zip Recursively load all images from dataset.zip
Specifying the output format and path:
\b
--dest /path/to/dir Save output files under /path/to/dir
--dest /path/to/dataset.zip Save output files into /path/to/dataset.zip
The output dataset format can be either an image folder or an uncompressed zip archive.
Zip archives makes it easier to move datasets around file servers and clusters, and may
offer better training performance on network file systems.
Images within the dataset archive will be stored as uncompressed PNG.
Uncompressed PNGs can be efficiently decoded in the training loop.
Class labels are stored in a file called 'dataset.json' that is stored at the
dataset root folder. This file has the following structure:
\b
{
"labels": [
["00000/img00000000.png",6],
["00000/img00000001.png",9],
... repeated for every image in the dataset
["00049/img00049999.png",1]
]
}
If the 'dataset.json' file cannot be found, the dataset is interpreted as
not containing class labels.
Image scale/crop and resolution requirements:
Output images must be square-shaped and they must all have the same power-of-two
dimensions.
To scale arbitrary input image size to a specific width and height, use the
--resolution option. Output resolution will be either the original
input resolution (if resolution was not specified) or the one specified with
--resolution option.
Use the --transform=center-crop or --transform=center-crop-wide options to apply a
center crop transform on the input image. These options should be used with the
--resolution option. For example:
\b
python dataset_tool.py --source LSUN/raw/cat_lmdb --dest /tmp/lsun_cat \\
--transform=center-crop-wide --resolution=512x384
"""
PIL.Image.init() # type: ignore
if dest == '':
ctx.fail('--dest output filename or directory must not be an empty string')
num_files, input_iter = open_dataset(source, max_images=max_images)
archive_root_dir, save_bytes, close_dest = open_dest(dest)
if resolution is None: resolution = (None, None)
transform_image = make_transform(transform, *resolution)
dataset_attrs = None
labels = []
for idx, image in tqdm(enumerate(input_iter), total=num_files):
idx_str = f'{idx:08d}'
archive_fname = f'{idx_str[:5]}/img{idx_str}.png'
# Apply crop and resize.
img = transform_image(image['img'])
# Transform may drop images.
if img is None:
continue
# Error check to require uniform image attributes across
# the whole dataset.
channels = img.shape[2] if img.ndim == 3 else 1
cur_image_attrs = {
'width': img.shape[1],
'height': img.shape[0],
'channels': channels
}
if dataset_attrs is None:
dataset_attrs = cur_image_attrs
width = dataset_attrs['width']
height = dataset_attrs['height']
if width != height:
error(f'Image dimensions after scale and crop are required to be square. Got {width}x{height}')
if dataset_attrs['channels'] not in [1, 3, 4]:
error('Input images must be stored as RGB or grayscale')
if width != 2 ** int(np.floor(np.log2(width))):
error('Image width/height after scale and crop are required to be power-of-two')
elif dataset_attrs != cur_image_attrs:
err = [f' dataset {k}/cur image {k}: {dataset_attrs[k]}/{cur_image_attrs[k]}' for k in dataset_attrs.keys()] # pylint: disable=unsubscriptable-object
error(f'Image {archive_fname} attributes must be equal across all images of the dataset. Got:\n' + '\n'.join(err))
# Save the image as an uncompressed PNG.
img = PIL.Image.fromarray(img, { 1: 'L', 3: 'RGB', 4: 'RGBA'}[channels])
if channels == 4: img = img.convert('RGB')
image_bits = io.BytesIO()
img.save(image_bits, format='png', compress_level=0, optimize=False)
save_bytes(os.path.join(archive_root_dir, archive_fname), image_bits.getbuffer())
labels.append([archive_fname, image['label']] if image['label'] is not None else None)
metadata = {
'labels': labels if all(x is not None for x in labels) else None
}
save_bytes(os.path.join(archive_root_dir, 'dataset.json'), json.dumps(metadata))
close_dest() | Convert an image dataset into a dataset archive usable with StyleGAN2 ADA PyTorch. The input dataset format is guessed from the --source argument: \b --source *_lmdb/ Load LSUN dataset --source cifar-10-python.tar.gz Load CIFAR-10 dataset --source train-images-idx3-ubyte.gz Load MNIST dataset --source path/ Recursively load all images from path/ --source dataset.zip Recursively load all images from dataset.zip Specifying the output format and path: \b --dest /path/to/dir Save output files under /path/to/dir --dest /path/to/dataset.zip Save output files into /path/to/dataset.zip The output dataset format can be either an image folder or an uncompressed zip archive. Zip archives makes it easier to move datasets around file servers and clusters, and may offer better training performance on network file systems. Images within the dataset archive will be stored as uncompressed PNG. Uncompressed PNGs can be efficiently decoded in the training loop. Class labels are stored in a file called 'dataset.json' that is stored at the dataset root folder. This file has the following structure: \b { "labels": [ ["00000/img00000000.png",6], ["00000/img00000001.png",9], ... repeated for every image in the dataset ["00049/img00049999.png",1] ] } If the 'dataset.json' file cannot be found, the dataset is interpreted as not containing class labels. Image scale/crop and resolution requirements: Output images must be square-shaped and they must all have the same power-of-two dimensions. To scale arbitrary input image size to a specific width and height, use the --resolution option. Output resolution will be either the original input resolution (if resolution was not specified) or the one specified with --resolution option. Use the --transform=center-crop or --transform=center-crop-wide options to apply a center crop transform on the input image. These options should be used with the --resolution option. For example: \b python dataset_tool.py --source LSUN/raw/cat_lmdb --dest /tmp/lsun_cat \\ --transform=center-crop-wide --resolution=512x384 |
9,351 | import json
import numpy as np
import os
from tqdm import tqdm
import argparse
def list_recursive(folderpath):
return [os.path.join(folderpath, filename) for filename in os.listdir(folderpath)] | null |
9,352 | import json
import numpy as np
from PIL import Image, ImageOps
import os
from tqdm import tqdm
import argparse
def gen_pose(rot_mat):
rot_mat = np.array(rot_mat).copy()
forward = rot_mat[:, 2]
translation = forward * -2.7
pose = np.array([
[rot_mat[0, 0], rot_mat[0, 1], rot_mat[0, 2], translation[0]],
[rot_mat[1, 0], rot_mat[1, 1], rot_mat[1, 2], translation[1]],
[rot_mat[2, 0], rot_mat[2, 1], rot_mat[2, 2], translation[2]],
[0, 0, 0, 1],
])
return pose | null |
9,353 | import json
import numpy as np
from PIL import Image, ImageOps
import os
from tqdm import tqdm
import argparse
def flip_yaw(pose_matrix):
flipped = pose_matrix.copy()
flipped[0, 1] *= -1
flipped[0, 2] *= -1
flipped[1, 0] *= -1
flipped[2, 0] *= -1
flipped[0, 3] *= -1
return flipped | null |
9,355 | import os
import sys
import requests
import html
import hashlib
import PIL.Image
import PIL.ImageFile
import numpy as np
import scipy.ndimage
import threading
import queue
import time
import json
import uuid
import glob
import argparse
import itertools
import shutil
from collections import OrderedDict, defaultdict
import cv2
def run(tasks, start_index, **download_kwargs):
if not os.path.isfile(json_spec['file_path']) or not os.path.isfile('LICENSE.txt'):
print('Downloading JSON metadata...')
download_files([json_spec, license_specs['json']], **download_kwargs)
print('Parsing JSON metadata...')
with open(json_spec['file_path'], 'rb') as f:
json_data = json.load(f, object_pairs_hook=OrderedDict)
if 'stats' in tasks:
print_statistics(json_data)
specs = []
if 'images' in tasks:
specs += [item['image'] for item in json_data.values()] + [license_specs['images']]
if 'thumbs' in tasks:
specs += [item['thumbnail'] for item in json_data.values()] + [license_specs['thumbs']]
if 'wilds' in tasks:
specs += [item['in_the_wild'] for item in json_data.values()] + [license_specs['wilds']]
if 'tfrecords' in tasks:
specs += tfrecords_specs + [license_specs['tfrecords']]
if len(specs):
print('Downloading %d files...' % len(specs))
np.random.shuffle(specs) # to make the workload more homogeneous
download_files(specs, **download_kwargs)
if 'align' in tasks:
recreate_aligned_images_fast(json_data, dst_dir="realign1500", output_size=1500, start_index=start_index)
def run_cmdline(argv):
parser = argparse.ArgumentParser(prog=argv[0], description='Download Flickr-Face-HQ (FFHQ) dataset to current working directory.')
parser.add_argument('-j', '--json', help='download metadata as JSON (254 MB)', dest='tasks', action='append_const', const='json')
parser.add_argument('-s', '--stats', help='print statistics about the dataset', dest='tasks', action='append_const', const='stats')
parser.add_argument('-i', '--images', help='download 1024x1024 images as PNG (89.1 GB)', dest='tasks', action='append_const', const='images')
parser.add_argument('-t', '--thumbs', help='download 128x128 thumbnails as PNG (1.95 GB)', dest='tasks', action='append_const', const='thumbs')
parser.add_argument('-w', '--wilds', help='download in-the-wild images as PNG (955 GB)', dest='tasks', action='append_const', const='wilds')
parser.add_argument('-r', '--tfrecords', help='download multi-resolution TFRecords (273 GB)', dest='tasks', action='append_const', const='tfrecords')
parser.add_argument('-a', '--align', help='recreate 1024x1024 images from in-the-wild images', dest='tasks', action='append_const', const='align')
parser.add_argument('--num_threads', help='number of concurrent download threads (default: 32)', type=int, default=32, metavar='NUM')
parser.add_argument('--status_delay', help='time between download status prints (default: 0.2)', type=float, default=0.2, metavar='SEC')
parser.add_argument('--timing_window', help='samples for estimating download eta (default: 50)', type=int, default=50, metavar='LEN')
parser.add_argument('--chunk_size', help='chunk size for each download thread (default: 128)', type=int, default=128, metavar='KB')
parser.add_argument('--num_attempts', help='number of download attempts per file (default: 10)', type=int, default=10, metavar='NUM')
parser.add_argument('--start_index', help='start index for alignment (default: 0)', type=int, default=0, metavar='NUM')
args = parser.parse_args()
if not args.tasks:
print('No tasks specified. Please see "-h" for help.')
exit(1)
run(**vars(args)) | null |
9,356 | import multiprocessing
import os
import re
import sys
import requests
import html
import hashlib
import PIL.Image
import PIL.ImageFile
import numpy as np
import scipy.ndimage
import threading
import queue
import time
import json
import uuid
import glob
import argparse
import itertools
import shutil
from collections import OrderedDict, defaultdict
import cv2
from tqdm import tqdm
import multiprocessing
def process_image(kwargs):
def recreate_aligned_images_fast(json_data, src_dir='.', dst_dir='realign1024x1024', output_size=1024, transform_size=4096, enable_padding=True, n_threads=12):
print('Recreating aligned images...')
if dst_dir:
os.makedirs(dst_dir, exist_ok=True)
shutil.copyfile(os.path.join(src_dir, 'LICENSE.txt'), os.path.join(dst_dir, 'LICENSE.txt'))
print(len(json_data))
inputs = [{'item_idx': item_idx, 'item': item, 'src_dir': src_dir, 'dst_dir': dst_dir, 'output_size': output_size, 'transform_size': transform_size, 'enable_padding': enable_padding} for item_idx, item in enumerate(json_data.values())]
with multiprocessing.Pool(n_threads) as p:
results = list(tqdm(p.imap(process_image, inputs), total=len(json_data), smoothing=0.1))
# for input in tqdm(inputs):
# process_image(input)
# All done.
print('\r%d / %d ... done' % (len(json_data), len(json_data))) | null |
9,357 | import json
import numpy as np
from PIL import Image, ImageOps
import os
from tqdm import tqdm
import argparse
import torch
import sys
from camera_utils import create_cam2world_matrix
def fix_intrinsics(intrinsics):
intrinsics = np.array(intrinsics).copy()
assert intrinsics.shape == (3, 3), intrinsics
intrinsics[0,0] = 2985.29/700
intrinsics[1,1] = 2985.29/700
intrinsics[0,2] = 1/2
intrinsics[1,2] = 1/2
assert intrinsics[0,1] == 0
assert intrinsics[2,2] == 1
assert intrinsics[1,0] == 0
assert intrinsics[2,0] == 0
assert intrinsics[2,1] == 0
return intrinsics | null |
9,358 | import json
import numpy as np
from PIL import Image, ImageOps
import os
from tqdm import tqdm
import argparse
import torch
import sys
from camera_utils import create_cam2world_matrix
def fix_pose(pose):
COR = np.array([0, 0, 0.175])
pose = np.array(pose).copy()
location = pose[:3, 3]
direction = (location - COR) / np.linalg.norm(location - COR)
pose[:3, 3] = direction * 2.7 + COR
return pose | null |
9,359 | import json
import numpy as np
from PIL import Image, ImageOps
import os
from tqdm import tqdm
import argparse
import torch
import sys
from camera_utils import create_cam2world_matrix
def fix_pose_orig(pose):
pose = np.array(pose).copy()
location = pose[:3, 3]
radius = np.linalg.norm(location)
pose[:3, 3] = pose[:3, 3]/radius * 2.7
return pose | null |
9,360 | import json
import numpy as np
from PIL import Image, ImageOps
import os
from tqdm import tqdm
import argparse
import torch
import sys
from camera_utils import create_cam2world_matrix
def create_cam2world_matrix(forward_vector, origin):
def fix_pose_simplify(pose):
cam_location = torch.tensor(pose).clone()[:3, 3]
normalized_cam_location = torch.nn.functional.normalize(cam_location - torch.tensor([0, 0, 0.175]), dim=0)
camera_view_dir = - normalized_cam_location
camera_pos = 2.7 * normalized_cam_location + np.array([0, 0, 0.175])
simple_pose_matrix = create_cam2world_matrix(camera_view_dir.unsqueeze(0), camera_pos.unsqueeze(0))[0]
return simple_pose_matrix.numpy() | null |
9,361 | import json
import numpy as np
from PIL import Image, ImageOps
import os
from tqdm import tqdm
import argparse
import torch
import sys
from camera_utils import create_cam2world_matrix
def flip_yaw(pose_matrix):
flipped = pose_matrix.copy()
flipped[0, 1] *= -1
flipped[0, 2] *= -1
flipped[1, 0] *= -1
flipped[2, 0] *= -1
flipped[0, 3] *= -1
return flipped | null |
9,362 | from share import *
import config
import cv2
import einops
import gradio as gr
import numpy as np
import torch
import random
from pytorch_lightning import seed_everything
from annotator.util import resize_image, HWC3
from cldm.model import create_model, load_state_dict
from cldm.ddim_hacked import DDIMSampler
model = create_model('./models/cldm_v15.yaml').cpu()
model.load_state_dict(load_state_dict('./models/control_sd15_scribble.pth', location='cuda'))
model = model.cuda()
ddim_sampler = DDIMSampler(model)
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
with torch.no_grad():
img = resize_image(HWC3(input_image['mask'][:, :, 0]), image_resolution)
H, W, C = img.shape
detected_map = np.zeros_like(img, dtype=np.uint8)
detected_map[np.min(img, axis=2) > 127] = 255
control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
control = torch.stack([control for _ in range(num_samples)], dim=0)
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
if seed == -1:
seed = random.randint(0, 65535)
seed_everything(seed)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
shape = (4, H // 8, W // 8)
if config.save_memory:
model.low_vram_shift(is_diffusing=True)
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
shape, cond, verbose=False, eta=eta,
unconditional_guidance_scale=scale,
unconditional_conditioning=un_cond)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
x_samples = model.decode_first_stage(samples)
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
results = [x_samples[i] for i in range(num_samples)]
return [255 - detected_map] + results | null |
9,363 | from share import *
import config
import cv2
import einops
import gradio as gr
import numpy as np
import torch
import random
from pytorch_lightning import seed_everything
from annotator.util import resize_image, HWC3
from cldm.model import create_model, load_state_dict
from cldm.ddim_hacked import DDIMSampler
def create_canvas(w, h):
return np.zeros(shape=(h, w, 3), dtype=np.uint8) + 255 | null |
9,364 | from share import *
import config
import cv2
import einops
import gradio as gr
import numpy as np
import torch
import random
from pytorch_lightning import seed_everything
from annotator.util import resize_image, HWC3
from annotator.canny import CannyDetector
from cldm.model import create_model, load_state_dict
from cldm.ddim_hacked import DDIMSampler
apply_canny = CannyDetector()
model = create_model('./models/cldm_v15.yaml').cpu()
model.load_state_dict(load_state_dict('./models/control_sd15_canny.pth', location='cuda'))
model = model.cuda()
ddim_sampler = DDIMSampler(model)
def HWC3(x):
assert x.dtype == np.uint8
if x.ndim == 2:
x = x[:, :, None]
assert x.ndim == 3
H, W, C = x.shape
assert C == 1 or C == 3 or C == 4
if C == 3:
return x
if C == 1:
return np.concatenate([x, x, x], axis=2)
if C == 4:
color = x[:, :, 0:3].astype(np.float32)
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
y = color * alpha + 255.0 * (1.0 - alpha)
y = y.clip(0, 255).astype(np.uint8)
return y
def resize_image(input_image, resolution):
H, W, C = input_image.shape
H = float(H)
W = float(W)
k = float(resolution) / min(H, W)
H *= k
W *= k
H = int(np.round(H / 64.0)) * 64
W = int(np.round(W / 64.0)) * 64
img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
return img
def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, low_threshold, high_threshold):
with torch.no_grad():
img = resize_image(HWC3(input_image), image_resolution)
H, W, C = img.shape
detected_map = apply_canny(img, low_threshold, high_threshold)
detected_map = HWC3(detected_map)
control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
control = torch.stack([control for _ in range(num_samples)], dim=0)
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
if seed == -1:
seed = random.randint(0, 65535)
seed_everything(seed)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
shape = (4, H // 8, W // 8)
if config.save_memory:
model.low_vram_shift(is_diffusing=True)
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
shape, cond, verbose=False, eta=eta,
unconditional_guidance_scale=scale,
unconditional_conditioning=un_cond)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
x_samples = model.decode_first_stage(samples)
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
results = [x_samples[i] for i in range(num_samples)]
return [255 - detected_map] + results | null |
9,365 | from share import *
import config
import cv2
import einops
import gradio as gr
import numpy as np
import torch
import random
from pytorch_lightning import seed_everything
from annotator.util import resize_image, HWC3
from annotator.hed import HEDdetector, nms
from cldm.model import create_model, load_state_dict
from cldm.ddim_hacked import DDIMSampler
apply_hed = HEDdetector()
model = create_model('./models/cldm_v15.yaml').cpu()
model.load_state_dict(load_state_dict('./models/control_sd15_scribble.pth', location='cuda'))
model = model.cuda()
ddim_sampler = DDIMSampler(model)
def HWC3(x):
def resize_image(input_image, resolution):
def nms(x, t, s):
def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
with torch.no_grad():
input_image = HWC3(input_image)
detected_map = apply_hed(resize_image(input_image, detect_resolution))
detected_map = HWC3(detected_map)
img = resize_image(input_image, image_resolution)
H, W, C = img.shape
detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
detected_map = nms(detected_map, 127, 3.0)
detected_map = cv2.GaussianBlur(detected_map, (0, 0), 3.0)
detected_map[detected_map > 4] = 255
detected_map[detected_map < 255] = 0
control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
control = torch.stack([control for _ in range(num_samples)], dim=0)
control = einops.rearrange(control, 'b h w c -> b c h w').clone()
if seed == -1:
seed = random.randint(0, 65535)
seed_everything(seed)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
shape = (4, H // 8, W // 8)
if config.save_memory:
model.low_vram_shift(is_diffusing=True)
model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
shape, cond, verbose=False, eta=eta,
unconditional_guidance_scale=scale,
unconditional_conditioning=un_cond)
if config.save_memory:
model.low_vram_shift(is_diffusing=False)
x_samples = model.decode_first_stage(samples)
x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
results = [x_samples[i] for i in range(num_samples)]
return [255 - detected_map] + results | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.