repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
ielab/llm-rankers
run.py
[ { "identifier": "SearchResult", "path": "rankers/rankers.py", "snippet": "class SearchResult:\n docid: str\n score: float\n text: str" }, { "identifier": "PointwiseLlmRanker", "path": "rankers/pointwise.py", "snippet": "class PointwiseLlmRanker(LlmRanker):\n\n def __init__(se...
import logging import ir_datasets import argparse import sys import json import time import random from pyserini.search.lucene import LuceneSearcher from pyserini.search._base import get_topics from rankers.rankers import SearchResult from rankers.pointwise import PointwiseLlmRanker, MonoT5LlmRanker from rankers.setwise import SetwiseLlmRanker, OpenAiSetwiseLlmRanker from rankers.pairwise import PairwiseLlmRanker, DuoT5LlmRanker, OpenAiPairwiseLlmRanker from rankers.listwise import OpenAiListwiseLlmRanker, ListwiseLlmRanker from tqdm import tqdm
13,420
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else:
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else:
ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path,
1
2023-10-14 01:39:38+00:00
16k
amazon-science/tabsyn
baselines/tabddpm/train.py
[ { "identifier": "make_dataset", "path": "utils_train.py", "snippet": "def make_dataset(\n data_path: str,\n T: src.Transformations,\n task_type,\n change_val: bool,\n concat = True,\n):\n\n # classification\n if task_type == 'binclass' or task_type == 'multiclass':\n X_cat = ...
import os import sys import time import torch import numpy as np import pandas as pd import src from copy import deepcopy from utils_train import make_dataset, update_ema from baselines.tabddpm.models.modules import MLPDiffusion from baselines.tabddpm.models.gaussian_multinomial_distribution import GaussianMultinomialDiffusion
11,647
def get_model( model_name, model_params, n_num_features, category_sizes ): print(model_name) if model_name == 'mlp':
def get_model( model_name, model_params, n_num_features, category_sizes ): print(model_name) if model_name == 'mlp':
model = MLPDiffusion(**model_params)
2
2023-10-10 18:06:31+00:00
16k
ThomasMrY/DisDiff
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import pytorch_lightning as pl import copy import os import pandas as pd from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.diffusionmodules.util import return_wrap
11,455
def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None, sampled_index= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, sampled_index = sampled_index, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) self.ce_loss = nn.CrossEntropyLoss(reduction = "none") if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) self.register_buffer("shift_coef", - to_torch(np.sqrt(alphas)) * (1. - self.alphas_cumprod_prev) / torch.sqrt(1. - self.alphas_cumprod)) self.register_buffer("ddim_coef", -self.sqrt_one_minus_alphas_cumprod) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") self.load_epoch = sd['epoch'] self.load_step = sd["global_step"] if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.ddim_coef, t, x.shape)) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=eps_pred) elif self.parameterization == "x0": x_recon = eps_pred if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.shift_coef, t, x_start.shape)) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(eps_pred, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): pass # _, loss_dict_no_ema = self.shared_step(batch) # with self.ema_scope(): # _, loss_dict_ema = self.shared_step(batch) # loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} # self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) # self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, dis_loss_flag = False, detach_flag = False, train_enc_flag = False, dis_weight = 1.0, dis_loss_type = "IM", *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key self.dis_loss_flag = dis_loss_flag self.detach_flag = detach_flag self.train_enc_flag = train_enc_flag self.dis_weight = dis_weight self.dis_loss_type = dis_loss_type try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if hasattr(self.model.diffusion_model,"scale_factor"): del self.scale_factor self.register_buffer('scale_factor', self.model.diffusion_model.scale_factor) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING Pre-Trained STD-RESCALING ###") else: del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] else: c = None xc = None out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None, sampled_index= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, sampled_index = sampled_index, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
5
2023-10-07 09:58:07+00:00
16k
wiio12/LEGO-Prover
lego_prover/prover.py
[ { "identifier": "IsabelleEnv", "path": "lego_prover/env/isa_bridge.py", "snippet": "class IsabelleEnv(gym.Env):\n def __init__(\n self,\n logger=None,\n isabelle_path=\"/Users/wiio/Isabelle2022\",\n working_dir=\"miniF2F\",\n interactive_file=\"miniF2F/interactive.t...
import os import random import re import time import multiprocessing as mp import tiktoken import lego_prover.utils as U import logging from lego_prover.env.isa_bridge import IsabelleEnv from .agents import ActionAgent from .agents import CurriculumAgent from .agents import SkillManager from langchain.schema import HumanMessage
11,622
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name self.env = IsabelleEnv( logger=self.logger, isabelle_path=isabelle_path, server_port=server_port ) self.action_agent_model_name = model_name self.tokenizer_encoder = tiktoken.encoding_for_model( self.action_agent_model_name) self.ckpt_dir = ckpt_dir self.temperature = temperature # init agents self.action_agent = ActionAgent( logger=self.logger, model_name=model_name, temperature=temperature, request_timeout=openai_api_request_timeout, ckpt_dir=ckpt_dir, ) self.action_agent_task_max_retries = action_agent_task_max_retries self.curriculum_agent = CurriculumAgent( logger=self.logger, ckpt_dir=ckpt_dir, resume=resume, miniF2F_tasks=miniF2F_tasks, curriculum_task_type=curriculum_task_type, curriculum_agent_lock=curriculum_agent_lock, )
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name self.env = IsabelleEnv( logger=self.logger, isabelle_path=isabelle_path, server_port=server_port ) self.action_agent_model_name = model_name self.tokenizer_encoder = tiktoken.encoding_for_model( self.action_agent_model_name) self.ckpt_dir = ckpt_dir self.temperature = temperature # init agents self.action_agent = ActionAgent( logger=self.logger, model_name=model_name, temperature=temperature, request_timeout=openai_api_request_timeout, ckpt_dir=ckpt_dir, ) self.action_agent_task_max_retries = action_agent_task_max_retries self.curriculum_agent = CurriculumAgent( logger=self.logger, ckpt_dir=ckpt_dir, resume=resume, miniF2F_tasks=miniF2F_tasks, curriculum_task_type=curriculum_task_type, curriculum_agent_lock=curriculum_agent_lock, )
self.skill_manager = SkillManager(
3
2023-10-09 04:23:43+00:00
16k
YingqingHe/ScaleCrafter-ptl
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,845
@torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
15
2023-10-11 10:57:55+00:00
16k
bilibini/Lovely_Image_Downloader
dist/py/Python38/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n ...
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,265
if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
is_suspiciously_successive_range(discovered_range, character_range)
5
2023-10-11 09:08:57+00:00
16k
MTgeophysics/mtpy-v2
mtpy/modeling/modem/residual.py
[ { "identifier": "Data", "path": "mtpy/modeling/modem/data.py", "snippet": "class Data:\n \"\"\"\n Data will read and write .dat files for ModEM and convert a WS data file\n to ModEM format.\n\n ..note: :: the data is interpolated onto the given periods such that all\n stations ...
from pathlib import Path from .data import Data from mtpy.modeling.plots import PlotRMS import numpy as np import pandas as pd
14,083
""" ================== ModEM ================== residuals class to contain RMS information revised by JP 2017 revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # =============================================================================
""" ================== ModEM ================== residuals class to contain RMS information revised by JP 2017 revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # =============================================================================
class Residual(Data):
0
2023-10-11 22:24:50+00:00
16k
Jacoo-ai/HIC-Yolov5
detect.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None, inplace=True, fuse=True):\n from models.yolo import Detect, Model\n\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ens...
import argparse import os import sys import cv2 import numpy as np import torch import torch.backends.cudnn as cudnn import onnxruntime import tensorflow as tf from pathlib import Path from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync
11,144
if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5m.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=640, # inference size (pixels) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA # Load model w = str(weights[0] if isinstance(weights, list) else weights) classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] check_suffix(w, suffixes) # check weights have acceptable suffix pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 if classify: # second-stage classifier modelc = load_classifier(name='resnet50', n=2) # initialize modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() elif onnx: if dnn: # check_requirements(('opencv-python>=4.5.4',)) net = cv2.dnn.readNetFromONNX(w) else: check_requirements(('onnx', 'onnxruntime')) session = onnxruntime.InferenceSession(w, None) else: # TensorFlow models check_requirements(('tensorflow>=2.4.1',)) if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), tf.nest.map_structure(x.graph.as_graph_element, outputs)) graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif saved_model: model = tf.keras.models.load_model(w) elif tflite: interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once dt, seen = [0.0, 0.0, 0.0], 0 for path, img, im0s, vid_cap in dataset: t1 = time_sync() if onnx: img = img.astype('float32') else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
8
2023-10-12 08:52:01+00:00
16k
OmicsML/scDiff
scdiff/ext/gears/gears.py
[ { "identifier": "GEARS_Model", "path": "scdiff/ext/gears/model.py", "snippet": "class GEARS_Model(torch.nn.Module):\n \"\"\"\n GEARS model\n\n \"\"\"\n\n def __init__(self, args):\n \"\"\"\n :param args: arguments dictionary\n \"\"\"\n\n super(GEARS_Model, self)._...
from copy import deepcopy from torch.optim.lr_scheduler import StepLR from .model import GEARS_Model from .inference import ( compute_metrics, deeper_analysis, evaluate, non_dropout_analysis, ) from .utils import ( GeneSimNetwork, create_cell_graph_dataset_for_prediction, get_GI_genes_idx, get_GI_params, get_mean_control, get_similarity_network, loss_fct, print_sys, uncertainty_loss_fct, ) from collections import OrderedDict from torch_geometric.data import DataLoader import os import pickle import warnings import numpy as np import torch import torch.nn as nn import torch.optim as optim import wandb import seaborn as sns import matplotlib.pyplot as plt
14,147
path: str path to save the model Returns ------- None """ if not os.path.exists(path): os.mkdir(path) if self.config is None: raise ValueError('No model is initialized...') with open(os.path.join(path, 'config.pkl'), 'wb') as f: pickle.dump(self.config, f) torch.save(self.best_model.state_dict(), os.path.join(path, 'model.pt')) def predict(self, pert_list): """ Predict the transcriptome given a list of genes/gene combinations being perturbed Parameters ---------- pert_list: list list of genes/gene combiantions to be perturbed Returns ------- results_pred: dict dictionary of predicted transcriptome results_logvar: dict dictionary of uncertainty score """ # given a list of single/combo genes, return the transcriptome # if uncertainty mode is on, also return uncertainty score. self.ctrl_adata = self.adata[self.adata.obs['condition'] == 'ctrl'] for pert in pert_list: for i in pert: if i not in self.pert_list: raise ValueError(i + " is not in the perturbation graph. " "Please select from GEARS.pert_list!") if self.config['uncertainty']: results_logvar = {} self.best_model = self.best_model.to(self.device) self.best_model.eval() results_pred = {} results_logvar_sum = {} for pert in pert_list: try: # If prediction is already saved, then skip inference results_pred['_'.join(pert)] = self.saved_pred['_'.join(pert)] if self.config['uncertainty']: results_logvar_sum['_'.join(pert)] = self.saved_logvar_sum['_'.join(pert)] continue except: pass cg = create_cell_graph_dataset_for_prediction(pert, self.ctrl_adata, self.pert_list, self.device) loader = DataLoader(cg, 300, shuffle=False) batch = next(iter(loader)) batch.to(self.device) with torch.no_grad(): if self.config['uncertainty']: p, unc = self.best_model(batch) results_logvar['_'.join(pert)] = np.mean(unc.detach().cpu().numpy(), axis=0) results_logvar_sum['_'.join(pert)] = np.exp(-np.mean(results_logvar['_'.join(pert)])) else: p = self.best_model(batch) results_pred['_'.join(pert)] = np.mean(p.detach().cpu().numpy(), axis=0) self.saved_pred.update(results_pred) if self.config['uncertainty']: self.saved_logvar_sum.update(results_logvar_sum) return results_pred, results_logvar_sum else: return results_pred def GI_predict(self, combo, GI_genes_file='./genes_with_hi_mean.npy'): """ Predict the GI scores following perturbation of a given gene combination Parameters ---------- combo: list list of genes to be perturbed GI_genes_file: str path to the file containing genes with high mean expression Returns ------- GI scores for the given combinatorial perturbation based on GEARS predictions """ # if uncertainty mode is on, also return uncertainty score. try: # If prediction is already saved, then skip inference pred = {} pred[combo[0]] = self.saved_pred[combo[0]] pred[combo[1]] = self.saved_pred[combo[1]] pred['_'.join(combo)] = self.saved_pred['_'.join(combo)] except: if self.config['uncertainty']: pred = self.predict([[combo[0]], [combo[1]], combo])[0] else: pred = self.predict([[combo[0]], [combo[1]], combo])
warnings.filterwarnings("ignore") class GEARS: """ GEARS base model class """ def __init__(self, pert_data, device='cuda', weight_bias_track=False, proj_name='GEARS', exp_name='GEARS'): """ Initialize GEARS model Parameters ---------- pert_data: PertData object dataloader for perturbation data device: str Device to run the model on. Default: 'cuda' weight_bias_track: bool Whether to track performance on wandb. Default: False proj_name: str Project name for wandb. Default: 'GEARS' exp_name: str Experiment name for wandb. Default: 'GEARS' Returns ------- None """ self.weight_bias_track = weight_bias_track if self.weight_bias_track: wandb.init(project=proj_name, name=exp_name) self.wandb = wandb else: self.wandb = None self.device = device self.config = None self.dataloader = pert_data.dataloader self.adata = pert_data.adata self.node_map = pert_data.node_map self.node_map_pert = pert_data.node_map_pert self.data_path = pert_data.data_path self.dataset_name = pert_data.dataset_name self.split = pert_data.split self.seed = pert_data.seed self.train_gene_set_size = pert_data.train_gene_set_size self.set2conditions = pert_data.set2conditions self.subgroup = pert_data.subgroup self.gene_list = pert_data.gene_names.values.tolist() self.pert_list = pert_data.pert_names.tolist() self.num_genes = len(self.gene_list) self.num_perts = len(self.pert_list) self.default_pert_graph = pert_data.default_pert_graph self.saved_pred = {} self.saved_logvar_sum = {} self.ctrl_expression = torch.tensor( np.mean(self.adata.X[self.adata.obs.condition == 'ctrl'], axis=0)).reshape(-1, ).to(self.device) pert_full_id2pert = dict(self.adata.obs[['condition_name', 'condition']].values) self.dict_filter = {pert_full_id2pert[i]: j for i, j in self.adata.uns['non_zeros_gene_idx'].items() if i in pert_full_id2pert} self.ctrl_adata = self.adata[self.adata.obs['condition'] == 'ctrl'] gene_dict = {g: i for i, g in enumerate(self.gene_list)} self.pert2gene = {p: gene_dict[pert] for p, pert in enumerate(self.pert_list) if pert in self.gene_list} def tunable_parameters(self): """ Return the tunable parameters of the model Returns ------- dict Tunable parameters of the model """ return {'hidden_size': 'hidden dimension, default 64', 'num_go_gnn_layers': 'number of GNN layers for GO graph, default 1', 'num_gene_gnn_layers': 'number of GNN layers for co-expression gene graph, default 1', 'decoder_hidden_size': 'hidden dimension for gene-specific decoder, default 16', 'num_similar_genes_go_graph': 'number of maximum similar K genes in the GO graph, default 20', 'num_similar_genes_co_express_graph': 'number of maximum similar K genes in the co expression graph, default 20', 'coexpress_threshold': 'pearson correlation threshold when constructing coexpression graph, default 0.4', 'uncertainty': 'whether or not to turn on uncertainty mode, default False', 'uncertainty_reg': 'regularization term to balance uncertainty loss and prediction loss, default 1', 'direction_lambda': 'regularization term to balance direction loss and prediction loss, default 1' } def model_initialize(self, hidden_size=64, num_go_gnn_layers=1, num_gene_gnn_layers=1, decoder_hidden_size=16, num_similar_genes_go_graph=20, num_similar_genes_co_express_graph=20, coexpress_threshold=0.4, uncertainty=False, uncertainty_reg=1, direction_lambda=1e-1, G_go=None, G_go_weight=None, G_coexpress=None, G_coexpress_weight=None, no_perturb=False, ): """ Initialize the model Parameters ---------- hidden_size: int hidden dimension, default 64 num_go_gnn_layers: int number of GNN layers for GO graph, default 1 num_gene_gnn_layers: int number of GNN layers for co-expression gene graph, default 1 decoder_hidden_size: int hidden dimension for gene-specific decoder, default 16 num_similar_genes_go_graph: int number of maximum similar K genes in the GO graph, default 20 num_similar_genes_co_express_graph: int number of maximum similar K genes in the co expression graph, default 20 coexpress_threshold: float pearson correlation threshold when constructing coexpression graph, default 0.4 uncertainty: bool whether or not to turn on uncertainty mode, default False uncertainty_reg: float regularization term to balance uncertainty loss and prediction loss, default 1 direction_lambda: float regularization term to balance direction loss and prediction loss, default 1 G_go: scipy.sparse.csr_matrix GO graph, default None G_go_weight: scipy.sparse.csr_matrix GO graph edge weights, default None G_coexpress: scipy.sparse.csr_matrix co-expression graph, default None G_coexpress_weight: scipy.sparse.csr_matrix co-expression graph edge weights, default None no_perturb: bool predict no perturbation condition, default False Returns ------- None """ self.config = {'hidden_size': hidden_size, 'num_go_gnn_layers': num_go_gnn_layers, 'num_gene_gnn_layers': num_gene_gnn_layers, 'decoder_hidden_size': decoder_hidden_size, 'num_similar_genes_go_graph': num_similar_genes_go_graph, 'num_similar_genes_co_express_graph': num_similar_genes_co_express_graph, 'coexpress_threshold': coexpress_threshold, 'uncertainty': uncertainty, 'uncertainty_reg': uncertainty_reg, 'direction_lambda': direction_lambda, 'G_go': G_go, 'G_go_weight': G_go_weight, 'G_coexpress': G_coexpress, 'G_coexpress_weight': G_coexpress_weight, 'device': self.device, 'num_genes': self.num_genes, 'num_perts': self.num_perts, 'no_perturb': no_perturb } if self.wandb: self.wandb.config.update(self.config) if self.config['G_coexpress'] is None: # calculating co expression similarity graph edge_list = get_similarity_network(network_type='co-express', adata=self.adata, threshold=coexpress_threshold, k=num_similar_genes_co_express_graph, data_path=self.data_path, data_name=self.dataset_name, split=self.split, seed=self.seed, train_gene_set_size=self.train_gene_set_size, set2conditions=self.set2conditions) sim_network = GeneSimNetwork(edge_list, self.gene_list, node_map=self.node_map) self.config['G_coexpress'] = sim_network.edge_index self.config['G_coexpress_weight'] = sim_network.edge_weight if self.config['G_go'] is None: # calculating gene ontology similarity graph edge_list = get_similarity_network(network_type='go', adata=self.adata, threshold=coexpress_threshold, k=num_similar_genes_go_graph, pert_list=self.pert_list, data_path=self.data_path, data_name=self.dataset_name, split=self.split, seed=self.seed, train_gene_set_size=self.train_gene_set_size, set2conditions=self.set2conditions, default_pert_graph=self.default_pert_graph) sim_network = GeneSimNetwork(edge_list, self.pert_list, node_map=self.node_map_pert) self.config['G_go'] = sim_network.edge_index self.config['G_go_weight'] = sim_network.edge_weight self.model = GEARS_Model(self.config).to(self.device) self.best_model = deepcopy(self.model) def load_pretrained(self, path): """ Load pretrained model Parameters ---------- path: str path to the pretrained model Returns ------- None """ with open(os.path.join(path, 'config.pkl'), 'rb') as f: config = pickle.load(f) del config['device'], config['num_genes'], config['num_perts'] self.model_initialize(**config) self.config = config state_dict = torch.load(os.path.join(path, 'model.pt'), map_location=torch.device('cpu')) if next(iter(state_dict))[:7] == 'module.': # the pretrained model is from data-parallel module new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] # remove `module.` new_state_dict[name] = v state_dict = new_state_dict self.model.load_state_dict(state_dict) self.model = self.model.to(self.device) self.best_model = self.model def save_model(self, path): """ Save the model Parameters ---------- path: str path to save the model Returns ------- None """ if not os.path.exists(path): os.mkdir(path) if self.config is None: raise ValueError('No model is initialized...') with open(os.path.join(path, 'config.pkl'), 'wb') as f: pickle.dump(self.config, f) torch.save(self.best_model.state_dict(), os.path.join(path, 'model.pt')) def predict(self, pert_list): """ Predict the transcriptome given a list of genes/gene combinations being perturbed Parameters ---------- pert_list: list list of genes/gene combiantions to be perturbed Returns ------- results_pred: dict dictionary of predicted transcriptome results_logvar: dict dictionary of uncertainty score """ # given a list of single/combo genes, return the transcriptome # if uncertainty mode is on, also return uncertainty score. self.ctrl_adata = self.adata[self.adata.obs['condition'] == 'ctrl'] for pert in pert_list: for i in pert: if i not in self.pert_list: raise ValueError(i + " is not in the perturbation graph. " "Please select from GEARS.pert_list!") if self.config['uncertainty']: results_logvar = {} self.best_model = self.best_model.to(self.device) self.best_model.eval() results_pred = {} results_logvar_sum = {} for pert in pert_list: try: # If prediction is already saved, then skip inference results_pred['_'.join(pert)] = self.saved_pred['_'.join(pert)] if self.config['uncertainty']: results_logvar_sum['_'.join(pert)] = self.saved_logvar_sum['_'.join(pert)] continue except: pass cg = create_cell_graph_dataset_for_prediction(pert, self.ctrl_adata, self.pert_list, self.device) loader = DataLoader(cg, 300, shuffle=False) batch = next(iter(loader)) batch.to(self.device) with torch.no_grad(): if self.config['uncertainty']: p, unc = self.best_model(batch) results_logvar['_'.join(pert)] = np.mean(unc.detach().cpu().numpy(), axis=0) results_logvar_sum['_'.join(pert)] = np.exp(-np.mean(results_logvar['_'.join(pert)])) else: p = self.best_model(batch) results_pred['_'.join(pert)] = np.mean(p.detach().cpu().numpy(), axis=0) self.saved_pred.update(results_pred) if self.config['uncertainty']: self.saved_logvar_sum.update(results_logvar_sum) return results_pred, results_logvar_sum else: return results_pred def GI_predict(self, combo, GI_genes_file='./genes_with_hi_mean.npy'): """ Predict the GI scores following perturbation of a given gene combination Parameters ---------- combo: list list of genes to be perturbed GI_genes_file: str path to the file containing genes with high mean expression Returns ------- GI scores for the given combinatorial perturbation based on GEARS predictions """ # if uncertainty mode is on, also return uncertainty score. try: # If prediction is already saved, then skip inference pred = {} pred[combo[0]] = self.saved_pred[combo[0]] pred[combo[1]] = self.saved_pred[combo[1]] pred['_'.join(combo)] = self.saved_pred['_'.join(combo)] except: if self.config['uncertainty']: pred = self.predict([[combo[0]], [combo[1]], combo])[0] else: pred = self.predict([[combo[0]], [combo[1]], combo])
mean_control = get_mean_control(self.adata).values
9
2023-10-13 14:20:34+00:00
16k
weavel-ai/promptmodel-python
promptmodel/chat_model.py
[ { "identifier": "DevClient", "path": "promptmodel/dev_app.py", "snippet": "class DevClient:\n \"\"\"DevClient main class\"\"\"\n\n def __init__(self):\n self.function_models: List[FunctionModelInterface] = []\n self.chat_models: List[ChatModelInterface] = []\n\n def register(self,...
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Coroutine, Union from uuid import uuid4 from litellm import ModelResponse from promptmodel import DevClient from promptmodel.llms.llm_proxy import LLMProxy from promptmodel.utils import logger from promptmodel.utils.config_utils import ( read_config, upsert_config, check_connection_status_decorator, ) from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.types.response import LLMStreamResponse, LLMResponse, ChatModelConfig from promptmodel.types.enums import InstanceType from promptmodel.types.request import ChatLogRequest from promptmodel.apis.base import AsyncAPIClient import sys
12,995
message_logs=message_logs, ) @check_connection_status_decorator def add_messages( self, new_messages: List[Dict[str, Any]], metadata_list: List[Optional[Dict]] = [], *args, **kwargs, ) -> None: """Add messages to the chat model. Args: new_messages (List[Dict[str, Any]]): list of messages. Each message is a dict with 'role', 'content', and 'function_call'. """ # Save messages to Cloud DB log_uuid_list = [str(uuid4()) for _ in range(len(new_messages))] run_async_in_sync( self.llm_proxy._async_chat_log_to_cloud( session_uuid=str(self.session_uuid), version_uuid=None, chat_log_request_list=[ ChatLogRequest(**{"message": message, "uuid": str(uuid4())}) for message in new_messages ], ) ) self.recent_log_uuid = log_uuid_list[-1] @check_connection_status_decorator def run( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs, ) -> LLMResponse: """Run FunctionModel. It does not raise error. Args: functions (List[Dict[str, Any]], optional): list of functions to run. Defaults to None. Returns: LLMResponse: response from the promptmodel. you can find raw output in response.raw_output or response.api_response['choices'][0]['message']['content']. Error: It does not raise error. If error occurs, you can check error in response.error and error_log in response.error_log. """ if stream: def gen(): cache: Optional[LLMStreamResponse] = None for item in self.llm_proxy.chat_stream( self.session_uuid, functions, tools ): yield item cache: LLMStreamResponse = item if cache: self.recent_log_uuid = cache.pm_detail.log_uuid return gen() else: res = self.llm_proxy.chat_run(self.session_uuid, functions, tools) self.recent_log_uuid = res.pm_detail.log_uuid return res # return self.llm_proxy.chat_run(self.session_uuid, functions, self.api_key) @check_connection_status_decorator async def arun( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs, ) -> LLMResponse: """Async run FunctionModel. It does not raise error. Args: functions (List[Dict[str, Any]], optional): list of functions to run. Defaults to None. Returns: LLMResponse: response from the promptmodel. you can find raw output in response.raw_output or response.api_response['choices'][0]['message']['content']. Error: It does not raise error. If error occurs, you can check error in response.error and error_log in response.error_log. """ if stream: async def async_gen(): cache: Optional[LLMStreamResponse] = None async for item in self.llm_proxy.chat_astream( self.session_uuid, functions, tools ): yield item cache: LLMStreamResponse = item if cache: self.recent_log_uuid = cache.pm_detail.log_uuid return async_gen() else: res: LLMResponse = await self.llm_proxy.chat_arun( self.session_uuid, functions ) self.recent_log_uuid = res.pm_detail.log_uuid return res # return await self.llm_proxy.chat_arun( # self.session_uuid, functions, self.api_key # ) @check_connection_status_decorator async def log_score_to_session( self, score: Optional[Dict[str, Any]] = {}, *args, **kwargs ): try:
from __future__ import annotations class RegisteringMeta(type): def __call__(cls, *args, **kwargs): instance: ChatModel = super().__call__(*args, **kwargs) # Find the global client instance in the current context client = cls.find_client_instance() if client is not None: client.register_chat_model(instance.name) return instance @staticmethod def find_client_instance(): # Get the current frame frame = sys._getframe(2) # Get global variables in the current frame global_vars = frame.f_globals # Find an instance of Client among global variables for var_name, var_val in global_vars.items(): if isinstance(var_val, DevClient): return var_val return None class ChatModel(metaclass=RegisteringMeta): """ Args: name (_type_): _description_ version (Optional[ Union[str, int] ], optional): Choose which FunctionModel version to use. Defaults to "deploy". It can be "deploy", "latest", or version number. api_key (Optional[str], optional): API key for the LLM. Defaults to None. If None, use api_key in .env file. """ def __init__( self, name, session_uuid: str = None, version: Optional[Union[str, int]] = "deploy", api_key: Optional[str] = None, ): self.name = name self.api_key = api_key self.llm_proxy = LLMProxy(name, version) self.version = version self.recent_log_uuid = None if session_uuid is None: self.session_uuid = str(uuid4()) instruction, version_details, chat_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, None, version) ) config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return else: run_async_in_sync( self.llm_proxy._async_make_session_cloud( self.session_uuid, version_details["uuid"], ) ) else: self.session_uuid = session_uuid @check_connection_status_decorator def get_config( self, *args, **kwargs, ) -> ChatModelConfig: """Get config for the ChatModel. It will fetch the published prompt and version config from the Cloud. (It will be saved in cache DB, so there is no extra latency for API call.) - If you made A/B testing in Web Dashboard, it will fetch the prompt randomly by the A/B testing ratio. If dev mode is initializing, it will return None Returns: ChatModelConfig: config for the ChatModel, which contains prompts and version_detail, message_logs """ prompt, version_detail, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, self.session_uuid, self.version) ) return ChatModelConfig( system_prompt=prompt, model=version_detail["model"], name=self.name, version_uuid=str(version_detail["uuid"]), version=version_detail["version"], message_logs=message_logs, ) @check_connection_status_decorator def add_messages( self, new_messages: List[Dict[str, Any]], metadata_list: List[Optional[Dict]] = [], *args, **kwargs, ) -> None: """Add messages to the chat model. Args: new_messages (List[Dict[str, Any]]): list of messages. Each message is a dict with 'role', 'content', and 'function_call'. """ # Save messages to Cloud DB log_uuid_list = [str(uuid4()) for _ in range(len(new_messages))] run_async_in_sync( self.llm_proxy._async_chat_log_to_cloud( session_uuid=str(self.session_uuid), version_uuid=None, chat_log_request_list=[ ChatLogRequest(**{"message": message, "uuid": str(uuid4())}) for message in new_messages ], ) ) self.recent_log_uuid = log_uuid_list[-1] @check_connection_status_decorator def run( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs, ) -> LLMResponse: """Run FunctionModel. It does not raise error. Args: functions (List[Dict[str, Any]], optional): list of functions to run. Defaults to None. Returns: LLMResponse: response from the promptmodel. you can find raw output in response.raw_output or response.api_response['choices'][0]['message']['content']. Error: It does not raise error. If error occurs, you can check error in response.error and error_log in response.error_log. """ if stream: def gen(): cache: Optional[LLMStreamResponse] = None for item in self.llm_proxy.chat_stream( self.session_uuid, functions, tools ): yield item cache: LLMStreamResponse = item if cache: self.recent_log_uuid = cache.pm_detail.log_uuid return gen() else: res = self.llm_proxy.chat_run(self.session_uuid, functions, tools) self.recent_log_uuid = res.pm_detail.log_uuid return res # return self.llm_proxy.chat_run(self.session_uuid, functions, self.api_key) @check_connection_status_decorator async def arun( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs, ) -> LLMResponse: """Async run FunctionModel. It does not raise error. Args: functions (List[Dict[str, Any]], optional): list of functions to run. Defaults to None. Returns: LLMResponse: response from the promptmodel. you can find raw output in response.raw_output or response.api_response['choices'][0]['message']['content']. Error: It does not raise error. If error occurs, you can check error in response.error and error_log in response.error_log. """ if stream: async def async_gen(): cache: Optional[LLMStreamResponse] = None async for item in self.llm_proxy.chat_astream( self.session_uuid, functions, tools ): yield item cache: LLMStreamResponse = item if cache: self.recent_log_uuid = cache.pm_detail.log_uuid return async_gen() else: res: LLMResponse = await self.llm_proxy.chat_arun( self.session_uuid, functions ) self.recent_log_uuid = res.pm_detail.log_uuid return res # return await self.llm_proxy.chat_arun( # self.session_uuid, functions, self.api_key # ) @check_connection_status_decorator async def log_score_to_session( self, score: Optional[Dict[str, Any]] = {}, *args, **kwargs ): try:
res = await AsyncAPIClient.execute(
12
2023-10-09 03:35:44+00:00
16k
cambridgeltl/ClaPS
algs/genetics.py
[ { "identifier": "BaseTrainer", "path": "algs/base_trainer.py", "snippet": "class BaseTrainer(abc.ABC):\n \"\"\"\n The base trainer class.\n\n Attributes:\n obj_func: the callable function handle for model interfacing.\n logger: an optional logger object.\n bn_calibrator: a ...
import random import numpy as np from typing import Any from .base_trainer import BaseTrainer from utils.fsc_datasets import PromptedClassificationDataset from rewards.text_classification_reward import PromptedClassificationReward
11,416
class Genetics: def __init__(self, crossover_tokenizer, vocab_id): self.crossover_tokenizer = crossover_tokenizer self.vocab_id = vocab_id def mutate(self, x, prob=0.1): """ Mutates the input string by replacing tokens with a certain probability. Args: x (str): The input string. prob (float, optional): The probability of replacing each token. Defaults to 0.1. Returns: str: The mutated string. """ x_list = self.crossover_tokenizer.encode(x) def pick_another(x_, candidates): return ( x_ if len(candidates) == 1 else random.choice([v for v in candidates if v != x_]) ) for i, element in enumerate(x_list): if i == 0 or i == len(x_list) - 1: continue if random.random() < prob: x_list[i] = pick_another(element, self.vocab_id) out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True) return out def crossover(self, x1, x2): """ Performs crossover between two input strings. Args: x1 (str): The first input string. x2 (str): The second input string. Returns: str: The crossover result. """ def _crossover_helper(v1, v2): return v1 if random.random() < 0.5 else v2 def _inbalance_helper(v1, v2): n_tokens = min(len(v1), len(v2)) max_n = max(len(v1), len(v2)) out_token = [] for i in range(n_tokens): out_token.append(v1[i] if random.random() < 0.5 else v2[i]) for i in range(n_tokens, max_n): out_token.append(v1[i] if len(v1) > n_tokens else v2[i]) return out_token x1_tokens = self.crossover_tokenizer.encode(x1) x2_tokens = self.crossover_tokenizer.encode(x2) x = _crossover_helper(x1_tokens, x2_tokens) ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True) return ret def random_string(self, length=5): """ Generates a random string of a specified length. Args: length (int, optional): The length of the random string. Defaults to 5. Returns: str: The random string. """ choices = self.vocab_id out = random.choices(choices, k=length) out = self.crossover_tokenizer.decode(out, skip_special_tokens=True) return out def random_extend_pop(self, pop: list, n: int) -> list: """ Extends the population with random strings. Args: pop (list): The population. n (int): The number of random strings to generate. Returns: list: The extended population. """ pop = [p + self.random_string(n) for p in pop] return pop class GeneticAlgorithmTrainer(BaseTrainer): def __init__( self, pop_size: int, mutate_size: int, crossover_size: int, epochs: int, mutate_frac: float, str_len: int, stages: int, n_classes: int, eval_batch_size: int, genetics: Genetics,
class Genetics: def __init__(self, crossover_tokenizer, vocab_id): self.crossover_tokenizer = crossover_tokenizer self.vocab_id = vocab_id def mutate(self, x, prob=0.1): """ Mutates the input string by replacing tokens with a certain probability. Args: x (str): The input string. prob (float, optional): The probability of replacing each token. Defaults to 0.1. Returns: str: The mutated string. """ x_list = self.crossover_tokenizer.encode(x) def pick_another(x_, candidates): return ( x_ if len(candidates) == 1 else random.choice([v for v in candidates if v != x_]) ) for i, element in enumerate(x_list): if i == 0 or i == len(x_list) - 1: continue if random.random() < prob: x_list[i] = pick_another(element, self.vocab_id) out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True) return out def crossover(self, x1, x2): """ Performs crossover between two input strings. Args: x1 (str): The first input string. x2 (str): The second input string. Returns: str: The crossover result. """ def _crossover_helper(v1, v2): return v1 if random.random() < 0.5 else v2 def _inbalance_helper(v1, v2): n_tokens = min(len(v1), len(v2)) max_n = max(len(v1), len(v2)) out_token = [] for i in range(n_tokens): out_token.append(v1[i] if random.random() < 0.5 else v2[i]) for i in range(n_tokens, max_n): out_token.append(v1[i] if len(v1) > n_tokens else v2[i]) return out_token x1_tokens = self.crossover_tokenizer.encode(x1) x2_tokens = self.crossover_tokenizer.encode(x2) x = _crossover_helper(x1_tokens, x2_tokens) ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True) return ret def random_string(self, length=5): """ Generates a random string of a specified length. Args: length (int, optional): The length of the random string. Defaults to 5. Returns: str: The random string. """ choices = self.vocab_id out = random.choices(choices, k=length) out = self.crossover_tokenizer.decode(out, skip_special_tokens=True) return out def random_extend_pop(self, pop: list, n: int) -> list: """ Extends the population with random strings. Args: pop (list): The population. n (int): The number of random strings to generate. Returns: list: The extended population. """ pop = [p + self.random_string(n) for p in pop] return pop class GeneticAlgorithmTrainer(BaseTrainer): def __init__( self, pop_size: int, mutate_size: int, crossover_size: int, epochs: int, mutate_frac: float, str_len: int, stages: int, n_classes: int, eval_batch_size: int, genetics: Genetics,
obj_func: PromptedClassificationReward,
2
2023-10-08 12:39:44+00:00
16k
clessig/atmorep
atmorep/core/atmorep_model.py
[ { "identifier": "identity", "path": "atmorep/utils/utils.py", "snippet": "def identity( func, *args) :\n return func( *args)" }, { "identifier": "NetMode", "path": "atmorep/utils/utils.py", "snippet": "class NetMode( Enum) :\n indeterminate = 0\n train = 1\n test = 2" }, { "i...
import torch import numpy as np import code import atmorep.utils.utils as utils from atmorep.utils.utils import identity from atmorep.utils.utils import NetMode from atmorep.utils.utils import get_model_filename from atmorep.transformer.transformer_base import prepare_token from atmorep.transformer.transformer_base import checkpoint_wrapper from atmorep.datasets.multifield_data_sampler import MultifieldDataSampler from atmorep.transformer.transformer_encoder import TransformerEncoder from atmorep.transformer.transformer_decoder import TransformerDecoder from atmorep.transformer.tail_ensemble import TailEnsemble
11,801
cf = self.cf self.devices = devices size_token_info = 6 self.fields_coupling_idx = [] self.fields_index = {} for ifield, field_info in enumerate(cf.fields) : self.fields_index[ field_info[0] ] = ifield # # embedding network for global/auxiliary token infos # TODO: only for backward compatibility, remove self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net) torch.nn.init.constant_( self.embed_token_info.weight, 0.0) self.embeds_token_info = torch.nn.ModuleList() for ifield, field_info in enumerate( cf.fields) : self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)) if len(field_info[1]) > 4 and load_pretrained : # TODO: inconsistent with embeds_token_info -> version that can handle both # we could imply use the file name: embed_token_info vs embeds_token_info name = 'AtmoRep' + '_embed_token_info' mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1])) self.embeds_token_info[-1].load_state_dict( mloaded) print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) ) else : # initalization torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0) self.embeds_token_info[-1].bias.data.fill_(0.0) # embedding and encoder self.embeds = torch.nn.ModuleList() self.encoders = torch.nn.ModuleList() self.masks = torch.nn.ParameterList() for field_idx, field_info in enumerate(cf.fields) : # learnabl class token if cf.learnable_mask : mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True)) self.masks.append( mask.to(devices[0])) else : self.masks.append( None) # encoder self.encoders.append( TransformerEncoder( cf, field_idx, True).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'encoder', self.encoders[-1]) self.embeds.append( self.encoders[-1].embed) # indices of coupled fields for efficient access in forward self.fields_coupling_idx.append( [field_idx]) for field_coupled in field_info[1][2] : if 'axial' in cf.encoder_att_type : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) else : for _ in range(cf.coupling_num_heads_per_field) : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) # decoder self.decoders = torch.nn.ModuleList() self.field_pred_idxs = [] for field in cf.fields_prediction : for ifield, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.field_pred_idxs.append( ifield) break self.decoders.append( TransformerDecoder( cf, field_info ) ) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'decoder', self.decoders[-1]) # tail networks self.tails = torch.nn.ModuleList() for ifield, field in enumerate(cf.fields_prediction) : field_idx = self.field_pred_idxs[ifield] field_info = cf.fields[field_idx] self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained: self.load_block( field_info, 'tail', self.tails[-1]) # set devices for field_idx, field_info in enumerate(cf.fields) : # find determined device, use default if nothing specified device = self.devices[0] if len(field_info[1]) > 3 : assert field_info[1][3] < 4, 'Only single node model parallelism supported' assert field_info[1][3] < len(devices), 'Per field device id larger than max devices' device = self.devices[ field_info[1][3] ] # set device if self.masks[field_idx] != None : self.masks[field_idx].to(device) self.embeds[field_idx].to(device) self.encoders[field_idx].to(device) for field_idx, field in enumerate(cf.fields_prediction) : field_info = cf.fields[ self.field_pred_idxs[field_idx] ] device = self.devices[0] if len(field_info[1]) > 3 : device = self.devices[ field_info[1][3] ] self.decoders[field_idx].to(device) self.tails[field_idx].to(device) # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove self.embeds_token_info.to(devices[0]) self.checkpoint = identity if cf.grad_checkpointing :
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class AtmoRepData( torch.nn.Module) : def __init__( self, net) : '''Wrapper class for AtmoRep that handles data loading''' super( AtmoRepData, self).__init__() self.data_loader_test = None self.data_loader_train = None self.data_loader_iter = None self.net = net # ensure that all data loaders have the same seed and hence load the same data self.rng_seed = net.cf.rng_seed if not self.rng_seed : self.rng_seed = int(torch.randint( 100000000, (1,))) ################################################### def load_data( self, mode : NetMode, batch_size = -1, num_loader_workers = -1) : '''Load data''' cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_max if num_loader_workers < 0 : num_loader_workers = cf.num_loader_workers if mode == NetMode.train : self.data_loader_train = self._load_data( self.dataset_train, batch_size, num_loader_workers) elif mode == NetMode.test : batch_size = cf.batch_size_test self.data_loader_test = self._load_data( self.dataset_test, batch_size, num_loader_workers) else : assert False ################################################### def _load_data( self, dataset, batch_size, num_loader_workers) : '''Private implementation for load''' dataset.load_data( batch_size) loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, 'num_workers': num_loader_workers, 'pin_memory': True} data_loader = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) return data_loader ################################################### def set_data( self, mode : NetMode, times_pos, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_data( times_pos, batch_size) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def set_global( self, mode : NetMode, times, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_global( times, batch_size, cf.token_overlap) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def set_location( self, mode : NetMode, pos, years, months, num_t_samples_per_month, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_location( pos, years, months, num_t_samples_per_month, batch_size) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def _set_data( self, dataset, mode : NetMode, batch_size = -1, loader_workers = -1) : '''Private implementation for set_data, set_global''' cf = self.net.cf if loader_workers < 0 : loader_workers = cf.num_loader_workers loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, 'num_workers': loader_workers, 'pin_memory': True} if mode == NetMode.train : self.data_loader_train = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) elif mode == NetMode.test : self.data_loader_test = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) else : assert False ################################################### def normalizer( self, field, vl_idx) : if isinstance( field, str) : for fidx, field_info in enumerate(self.cf.fields) : if field == field_info[0] : break assert fidx < len(self.cf.fields), 'invalid field' normalizer = self.dataset_train.datasets[fidx].normalizer elif isinstance( field, int) : normalizer = self.dataset_train.datasets[field][vl_idx].normalizer else : assert False, 'invalid argument type (has to be index to cf.fields or field name)' return normalizer ################################################### def mode( self, mode : NetMode) : if mode == NetMode.train : self.data_loader_iter = iter(self.data_loader_train) self.net.train() elif mode == NetMode.test : self.data_loader_iter = iter(self.data_loader_test) self.net.eval() else : assert False self.cur_mode = mode ################################################### def len( self, mode : NetMode) : if mode == NetMode.train : return len(self.data_loader_train) elif mode == NetMode.test : return len(self.data_loader_test) else : assert False ################################################### def next( self) : return next(self.data_loader_iter) ################################################### def forward( self, xin) : pred = self.net.forward( xin) return pred ################################################### def get_attention( self, xin): #, field_idx) : attn = self.net.get_attention( xin) #, field_idx) return attn ################################################### def create( self, pre_batch, devices, create_net = True, pre_batch_targets = None, load_pretrained=True) : if create_net : self.net.create( devices, load_pretrained) self.pre_batch = pre_batch self.pre_batch_targets = pre_batch_targets cf = self.net.cf self.dataset_train = MultifieldDataSampler( cf.data_dir, cf.years_train, cf.fields, batch_size = cf.batch_size_start, num_t_samples = cf.num_t_samples, num_patches_per_t = cf.num_patches_per_t_train, num_load = cf.num_files_train, pre_batch = self.pre_batch, rng_seed = self.rng_seed, file_shape = cf.file_shape, smoothing = cf.data_smoothing, level_type = cf.level_type, file_format = cf.file_format, month = cf.month, time_sampling = cf.time_sampling, geo_range = cf.geo_range_sampling, fields_targets = cf.fields_targets, pre_batch_targets = self.pre_batch_targets ) self.dataset_test = MultifieldDataSampler( cf.data_dir, cf.years_test, cf.fields, batch_size = cf.batch_size_test, num_t_samples = cf.num_t_samples, num_patches_per_t = cf.num_patches_per_t_test, num_load = cf.num_files_test, pre_batch = self.pre_batch, rng_seed = self.rng_seed, file_shape = cf.file_shape, smoothing = cf.data_smoothing, level_type = cf.level_type, file_format = cf.file_format, month = cf.month, time_sampling = cf.time_sampling, geo_range = cf.geo_range_sampling, lat_sampling_weighted = cf.lat_sampling_weighted, fields_targets = cf.fields_targets, pre_batch_targets = self.pre_batch_targets ) return self #################################################################################################### class AtmoRep( torch.nn.Module) : def __init__(self, cf) : '''Constructor''' super( AtmoRep, self).__init__() self.cf = cf ################################################### def create( self, devices, load_pretrained=True) : '''Create network''' cf = self.cf self.devices = devices size_token_info = 6 self.fields_coupling_idx = [] self.fields_index = {} for ifield, field_info in enumerate(cf.fields) : self.fields_index[ field_info[0] ] = ifield # # embedding network for global/auxiliary token infos # TODO: only for backward compatibility, remove self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net) torch.nn.init.constant_( self.embed_token_info.weight, 0.0) self.embeds_token_info = torch.nn.ModuleList() for ifield, field_info in enumerate( cf.fields) : self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)) if len(field_info[1]) > 4 and load_pretrained : # TODO: inconsistent with embeds_token_info -> version that can handle both # we could imply use the file name: embed_token_info vs embeds_token_info name = 'AtmoRep' + '_embed_token_info' mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1])) self.embeds_token_info[-1].load_state_dict( mloaded) print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) ) else : # initalization torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0) self.embeds_token_info[-1].bias.data.fill_(0.0) # embedding and encoder self.embeds = torch.nn.ModuleList() self.encoders = torch.nn.ModuleList() self.masks = torch.nn.ParameterList() for field_idx, field_info in enumerate(cf.fields) : # learnabl class token if cf.learnable_mask : mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True)) self.masks.append( mask.to(devices[0])) else : self.masks.append( None) # encoder self.encoders.append( TransformerEncoder( cf, field_idx, True).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'encoder', self.encoders[-1]) self.embeds.append( self.encoders[-1].embed) # indices of coupled fields for efficient access in forward self.fields_coupling_idx.append( [field_idx]) for field_coupled in field_info[1][2] : if 'axial' in cf.encoder_att_type : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) else : for _ in range(cf.coupling_num_heads_per_field) : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) # decoder self.decoders = torch.nn.ModuleList() self.field_pred_idxs = [] for field in cf.fields_prediction : for ifield, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.field_pred_idxs.append( ifield) break self.decoders.append( TransformerDecoder( cf, field_info ) ) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'decoder', self.decoders[-1]) # tail networks self.tails = torch.nn.ModuleList() for ifield, field in enumerate(cf.fields_prediction) : field_idx = self.field_pred_idxs[ifield] field_info = cf.fields[field_idx] self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained: self.load_block( field_info, 'tail', self.tails[-1]) # set devices for field_idx, field_info in enumerate(cf.fields) : # find determined device, use default if nothing specified device = self.devices[0] if len(field_info[1]) > 3 : assert field_info[1][3] < 4, 'Only single node model parallelism supported' assert field_info[1][3] < len(devices), 'Per field device id larger than max devices' device = self.devices[ field_info[1][3] ] # set device if self.masks[field_idx] != None : self.masks[field_idx].to(device) self.embeds[field_idx].to(device) self.encoders[field_idx].to(device) for field_idx, field in enumerate(cf.fields_prediction) : field_info = cf.fields[ self.field_pred_idxs[field_idx] ] device = self.devices[0] if len(field_info[1]) > 3 : device = self.devices[ field_info[1][3] ] self.decoders[field_idx].to(device) self.tails[field_idx].to(device) # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove self.embeds_token_info.to(devices[0]) self.checkpoint = identity if cf.grad_checkpointing :
self.checkpoint = checkpoint_wrapper
4
2023-10-09 19:42:46+00:00
16k
NKI-AI/ahcore
ahcore/callbacks/wsi_metric_callback.py
[ { "identifier": "WriteH5Callback", "path": "ahcore/callbacks/h5_callback.py", "snippet": "class WriteH5Callback(Callback):\n def __init__(\n self,\n max_queue_size: int,\n max_concurrent_writers: int,\n dump_dir: Path,\n normalization_type: str = str(NormalizationTy...
import itertools import json import multiprocessing import time import pytorch_lightning as pl import torch from collections import namedtuple from multiprocessing.pool import Pool from pathlib import Path from typing import Any, Generator, Optional, cast from pytorch_lightning import Callback from ahcore.callbacks import WriteH5Callback from ahcore.lit_module import AhCoreLightningModule from ahcore.metrics import WSIMetricFactory from ahcore.readers import H5FileImageReader, StitchingMode from ahcore.utils.callbacks import _get_h5_output_filename, _ValidationDataset from ahcore.utils.data import DataDescription from ahcore.utils.io import get_logger from ahcore.utils.manifest import DataManager, ImageMetadata, fetch_image_metadata, get_mask_and_annotations_from_record
12,999
assert self._dump_dir assert self._data_description assert self._validate_metadata assert self._data_manager metrics = [] with multiprocessing.Pool(processes=self._max_processes) as pool: results_to_filename: dict[list[dict[str, Any]], str] = {} completed_tasks = 0 # Fill up the initial task pool for image_metadata in itertools.islice(self._validate_metadata, self._max_processes): logger.info("Metadata: %s", image_metadata) # Assemble the task data # filename", "h5_filename", "metadata", "mask", "annotations" task_data = prepare_task_data( image_metadata.filename, self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) while results_to_filename: time.sleep(0.1) # Reduce excessive polling # Check for completed tasks for result in list(results_to_filename.keys()): if result.ready(): filename = results_to_filename.pop(result) try: metric = result.get() except Exception as exc: self._logger.error("%r generated an exception: %s" % (filename, exc)) else: metrics.append(metric) self._logger.debug("Metric for %r is %s" % (filename, metric)) completed_tasks += 1 # Schedule a new task if there are more filenames left in the generator next_metadata = next(self._validate_metadata, None) while next_metadata: task_data = prepare_task_data( next_metadata.filename, # <-- Changed from image_metadata.filename self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) next_metadata = next(self._validate_metadata, None) return metrics def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") if not self._wsi_metrics: raise ValueError("WSI metrics are not set.") assert self._model_name # This should be set in the setup() # Ensure that all h5 files have been written self._logger.debug("Computing metrics for %s predictions", len(self._filenames)) computed_metrics = self.compute_metrics(trainer, pl_module) metrics = self._wsi_metrics.get_average_score(computed_metrics) results_json_fn = ( self._dump_dir / "outputs" / self._model_name / f"step_{pl_module.global_step}" / "results.json" ) with open(results_json_fn, "w", encoding="utf-8") as json_file: json.dump(self._dump_list, json_file, indent=2) self._wsi_metrics.reset() # Reset stuff self._dump_list = [] self._filenames = {} self._logger.debug("Metrics: %s", metrics) # TODO: Maybe put this elsewhere? metrics = {f"validate/{k}": v for k, v in metrics.items()} pl_module.log_dict(metrics, prog_bar=True) TaskData = namedtuple("TaskData", ["filename", "h5_filename", "metadata", "mask", "annotations"]) def prepare_task_data( filename: Path, dump_dir: Path, pl_module: pl.LightningModule, data_description: DataDescription, data_manager: DataManager, ) -> TaskData: h5_filename = _get_h5_output_filename( dump_dir=dump_dir, input_path=data_description.data_dir / filename, model_name=str(pl_module.name), step=pl_module.global_step, ) image = data_manager.get_image_by_filename(str(filename))
from __future__ import annotations logger = get_logger(__name__) class ComputeWsiMetricsCallback(Callback): def __init__(self, max_processes: int = 10, save_per_image: bool = True) -> None: """ Callback to compute metrics on whole-slide images. This callback is used to compute metrics on whole-slide images in separate processes. Parameters ---------- max_processes : int The maximum number of concurrent processes. """ self._data_description: Optional[DataDescription] = None self._reader = H5FileImageReader self._max_processes: int = max_processes self._dump_dir: Optional[Path] = None self._save_per_image = save_per_image self._filenames: dict[Path, Path] = {} self._wsi_metrics: WSIMetricFactory | None = None self._class_names: dict[int, str] = {} self._data_manager = None self._validate_filenames_gen = None self._model_name: str | None = None self._validate_metadata_gen: Generator[ImageMetadata, None, None] | None = None self._dump_list: list[dict[str, str]] = [] self._logger = get_logger(type(self).__name__) def setup( self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None, ) -> None: if not isinstance(pl_module, AhCoreLightningModule): # TODO: Make a AhCoreCallback with these features raise ValueError("AhCoreLightningModule required for WriteTiffCallback.") self._model_name = pl_module.name _callback: Optional[WriteH5Callback] = None for idx, callback in enumerate(trainer.callbacks): # type: ignore if isinstance(callback, WriteH5Callback): _callback = cast(WriteH5Callback, trainer.callbacks[idx]) # type: ignore break if _callback is None: raise ValueError( "WriteH5Callback is not in the trainer's callbacks. " "This is required before WSI metrics can be computed using this Callback" ) self._dump_dir = _callback.dump_dir if pl_module.wsi_metrics is None: raise ValueError("WSI metrics are not set.") self._wsi_metrics = pl_module.wsi_metrics self._data_description = trainer.datamodule.data_description # type: ignore # For mypy assert self._data_description index_map = self._data_description.index_map assert index_map if not self._data_description: raise ValueError("Data description is not set.") self._class_names = dict([(v, k) for k, v in index_map.items()]) self._class_names[0] = "background" # Here we can query the database for the validation images self._data_manager: DataManager = trainer.datamodule.data_manager # type: ignore def _create_validate_image_metadata_gen( self, ) -> Generator[ImageMetadata, None, None]: assert self._data_description assert self._data_manager gen = self._data_manager.get_image_metadata_by_split( manifest_name=self._data_description.manifest_name, split_version=self._data_description.split_version, split_category="validate", ) for image_metadata in gen: yield image_metadata @property def _validate_metadata(self) -> Generator[ImageMetadata, None, None] | None: return self._validate_metadata_gen def on_validation_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: self._validate_metadata_gen = self._create_validate_image_metadata_gen() def on_validation_batch_end( self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0, ) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") filenames = batch["path"] # Filenames are constant across the batch. if len(set(filenames)) != 1: raise ValueError( "All paths in a batch must be the same. " "Either use batch_size=1 or ahcore.data.samplers.WsiBatchSampler." ) def compute_metrics( self, trainer: pl.Trainer, pl_module: pl.LightningModule ) -> list[list[dict[str, dict[str, float]]]]: assert self._dump_dir assert self._data_description assert self._validate_metadata assert self._data_manager metrics = [] with multiprocessing.Pool(processes=self._max_processes) as pool: results_to_filename: dict[list[dict[str, Any]], str] = {} completed_tasks = 0 # Fill up the initial task pool for image_metadata in itertools.islice(self._validate_metadata, self._max_processes): logger.info("Metadata: %s", image_metadata) # Assemble the task data # filename", "h5_filename", "metadata", "mask", "annotations" task_data = prepare_task_data( image_metadata.filename, self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) while results_to_filename: time.sleep(0.1) # Reduce excessive polling # Check for completed tasks for result in list(results_to_filename.keys()): if result.ready(): filename = results_to_filename.pop(result) try: metric = result.get() except Exception as exc: self._logger.error("%r generated an exception: %s" % (filename, exc)) else: metrics.append(metric) self._logger.debug("Metric for %r is %s" % (filename, metric)) completed_tasks += 1 # Schedule a new task if there are more filenames left in the generator next_metadata = next(self._validate_metadata, None) while next_metadata: task_data = prepare_task_data( next_metadata.filename, # <-- Changed from image_metadata.filename self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) next_metadata = next(self._validate_metadata, None) return metrics def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") if not self._wsi_metrics: raise ValueError("WSI metrics are not set.") assert self._model_name # This should be set in the setup() # Ensure that all h5 files have been written self._logger.debug("Computing metrics for %s predictions", len(self._filenames)) computed_metrics = self.compute_metrics(trainer, pl_module) metrics = self._wsi_metrics.get_average_score(computed_metrics) results_json_fn = ( self._dump_dir / "outputs" / self._model_name / f"step_{pl_module.global_step}" / "results.json" ) with open(results_json_fn, "w", encoding="utf-8") as json_file: json.dump(self._dump_list, json_file, indent=2) self._wsi_metrics.reset() # Reset stuff self._dump_list = [] self._filenames = {} self._logger.debug("Metrics: %s", metrics) # TODO: Maybe put this elsewhere? metrics = {f"validate/{k}": v for k, v in metrics.items()} pl_module.log_dict(metrics, prog_bar=True) TaskData = namedtuple("TaskData", ["filename", "h5_filename", "metadata", "mask", "annotations"]) def prepare_task_data( filename: Path, dump_dir: Path, pl_module: pl.LightningModule, data_description: DataDescription, data_manager: DataManager, ) -> TaskData: h5_filename = _get_h5_output_filename( dump_dir=dump_dir, input_path=data_description.data_dir / filename, model_name=str(pl_module.name), step=pl_module.global_step, ) image = data_manager.get_image_by_filename(str(filename))
metadata = fetch_image_metadata(image)
11
2023-10-14 18:04:12+00:00
16k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/linear_model/_omp.py
[ { "identifier": "MultiOutputMixin", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class MultiOutputMixin:\n \"\"\"Mixin to mark estimators that support multioutput.\"\"\"\n\n def _more_tags(self):\n return {\"multioutput\": True}" }, { "identifier":...
import warnings import numpy as np from math import sqrt from numbers import Integral, Real from scipy import linalg from scipy.linalg.lapack import get_lapack_funcs from ..base import MultiOutputMixin, RegressorMixin, _fit_context from ..model_selection import check_cv from ..utils import as_float_array, check_array from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params from ..utils.parallel import Parallel, delayed from ._base import LinearModel, _deprecate_normalize, _pre_fit
13,460
overrides `n_nonzero_coefs`. norms_squared : array-like of shape (n_targets,), default=None Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, default=True Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, default=True Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- coef : ndarray of shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See Also -------- OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP). orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. sklearn.decomposition.sparse_encode : Generic sparse coding. Each column of the result is the solution to a Lasso problem. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order="F", copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if copy_Xy or not Xy.flags.writeable: # Make the copy once instead of many times in _gram_omp itself. Xy = Xy.copy() if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError( "Gram OMP needs the precomputed norms in order " "to evaluate the error sum of squares." ) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError( "The number of atoms cannot be more than the number of features" ) if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype) else: coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=False, return_path=return_path, ) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, : len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef)
"""Orthogonal matching pursuit algorithms """ # Author: Vlad Niculae # # License: BSD 3 clause premature = ( "Orthogonal matching pursuit ended prematurely due to linear" " dependence in the dictionary. The requested precision might" " not have been met." ) def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): """Orthogonal Matching Pursuit step using the Cholesky decomposition. Parameters ---------- X : ndarray of shape (n_samples, n_features) Input dictionary. Columns are assumed to have unit norm. y : ndarray of shape (n_samples,) Input targets. n_nonzero_coefs : int Targeted number of non-zero elements. tol : float, default=None Targeted squared error, if not None overrides n_nonzero_coefs. copy_X : bool, default=True Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : ndarray of shape (n_nonzero_coefs,) Non-zero elements of the solution. idx : ndarray of shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector. coef : ndarray of shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ if copy_X: X = X.copy("F") else: # even if we are allowed to overwrite, still copy it if bad order X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (X,)) (potrs,) = get_lapack_funcs(("potrs",), (X,)) alpha = np.dot(X.T, y) residual = y gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) # keeping track of swapping max_features = X.shape[1] if tol is not None else n_nonzero_coefs L = np.empty((max_features, max_features), dtype=X.dtype) if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: # atom already selected or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: # Updates the Cholesky decomposition of X' X L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular( L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False, ) v = nrm2(L[n_active, :n_active]) ** 2 Lkk = linalg.norm(X[:, lam]) ** 2 - v if Lkk <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = sqrt(Lkk) else: L[0, 0] = linalg.norm(X[:, lam]) X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] indices[n_active], indices[lam] = indices[lam], indices[n_active] n_active += 1 # solves LL'x = X'y as a composition of two triangular systems gamma, _ = potrs( L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False ) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def _gram_omp( Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, copy_Gram=True, copy_Xy=True, return_path=False, ): """Orthogonal Matching Pursuit step on a precomputed Gram matrix. This function uses the Cholesky decomposition method. Parameters ---------- Gram : ndarray of shape (n_features, n_features) Gram matrix of the input data matrix. Xy : ndarray of shape (n_features,) Input targets. n_nonzero_coefs : int Targeted number of non-zero elements. tol_0 : float, default=None Squared norm of y, required if tol is not None. tol : float, default=None Targeted squared error, if not None overrides n_nonzero_coefs. copy_Gram : bool, default=True Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, default=True Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : ndarray of shape (n_nonzero_coefs,) Non-zero elements of the solution. idx : ndarray of shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector. coefs : ndarray of shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ Gram = Gram.copy("F") if copy_Gram else np.asfortranarray(Gram) if copy_Xy or not Xy.flags.writeable: Xy = Xy.copy() min_float = np.finfo(Gram.dtype).eps nrm2, swap = linalg.get_blas_funcs(("nrm2", "swap"), (Gram,)) (potrs,) = get_lapack_funcs(("potrs",), (Gram,)) indices = np.arange(len(Gram)) # keeping track of swapping alpha = Xy tol_curr = tol_0 delta = 0 gamma = np.empty(0) n_active = 0 max_features = len(Gram) if tol is not None else n_nonzero_coefs L = np.empty((max_features, max_features), dtype=Gram.dtype) L[0, 0] = 1.0 if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(alpha)) if lam < n_active or alpha[lam] ** 2 < min_float: # selected same atom twice, or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=3) break if n_active > 0: L[n_active, :n_active] = Gram[lam, :n_active] linalg.solve_triangular( L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, check_finite=False, ) v = nrm2(L[n_active, :n_active]) ** 2 Lkk = Gram[lam, lam] - v if Lkk <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=3) break L[n_active, n_active] = sqrt(Lkk) else: L[0, 0] = sqrt(Gram[lam, lam]) Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) indices[n_active], indices[lam] = indices[lam], indices[n_active] Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] n_active += 1 # solves LL'x = X'y as a composition of two triangular systems gamma, _ = potrs( L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False ) if return_path: coefs[:n_active, n_active - 1] = gamma beta = np.dot(Gram[:, :n_active], gamma) alpha = Xy - beta if tol is not None: tol_curr += delta delta = np.inner(gamma, beta[:n_active]) tol_curr -= delta if abs(tol_curr) <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active @validate_params( { "X": ["array-like"], "y": [np.ndarray], "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None], "tol": [Interval(Real, 0, None, closed="left"), None], "precompute": ["boolean", StrOptions({"auto"})], "copy_X": ["boolean"], "return_path": ["boolean"], "return_n_iter": ["boolean"], }, prefer_skip_nested_validation=True, ) def orthogonal_mp( X, y, *, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False, ): r"""Orthogonal Matching Pursuit (OMP). Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : ndarray of shape (n_samples,) or (n_samples, n_targets) Input targets. n_nonzero_coefs : int, default=None Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, default=None Maximum squared norm of the residual. If not None, overrides n_nonzero_coefs. precompute : 'auto' or bool, default=False Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, default=True Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- coef : ndarray of shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis generates coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See Also -------- OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model. orthogonal_mp_gram : Solve OMP problems using Gram matrix and the product X.T * y. lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. sklearn.decomposition.sparse_encode : Sparse coding. Notes ----- Orthogonal matching pursuit was introduced in S. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order="F", copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: # subsequent targets will be affected copy_X = True if n_nonzero_coefs is None and tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError( "The number of atoms cannot be more than the number of features" ) if precompute == "auto": precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum((y**2), axis=0) else: norms_squared = None return orthogonal_mp_gram( G, Xy, n_nonzero_coefs=n_nonzero_coefs, tol=tol, norms_squared=norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path, ) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): out = _cholesky_omp( X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path ) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, : len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) def orthogonal_mp_gram( Gram, Xy, *, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False, ): """Gram Orthogonal Matching Pursuit (OMP). Solves n_targets Orthogonal Matching Pursuit problems using only the Gram matrix X.T * X and the product X.T * y. Read more in the :ref:`User Guide <omp>`. Parameters ---------- Gram : ndarray of shape (n_features, n_features) Gram matrix of the input data: X.T * X. Xy : ndarray of shape (n_features,) or (n_features, n_targets) Input targets multiplied by X: X.T * y. n_nonzero_coefs : int, default=None Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, default=None Maximum squared norm of the residual. If not `None`, overrides `n_nonzero_coefs`. norms_squared : array-like of shape (n_targets,), default=None Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, default=True Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, default=True Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, default=False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, default=False Whether or not to return the number of iterations. Returns ------- coef : ndarray of shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See Also -------- OrthogonalMatchingPursuit : Orthogonal Matching Pursuit model (OMP). orthogonal_mp : Solves n_targets Orthogonal Matching Pursuit problems. lars_path : Compute Least Angle Regression or Lasso path using LARS algorithm. sklearn.decomposition.sparse_encode : Generic sparse coding. Each column of the result is the solution to a Lasso problem. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (https://www.di.ens.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. https://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order="F", copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if copy_Xy or not Xy.flags.writeable: # Make the copy once instead of many times in _gram_omp itself. Xy = Xy.copy() if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError( "Gram OMP needs the precomputed norms in order " "to evaluate the error sum of squares." ) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError( "The number of atoms cannot be more than the number of features" ) if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)), dtype=Gram.dtype) else: coef = np.zeros((len(Gram), Xy.shape[1]), dtype=Gram.dtype) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=False, return_path=return_path, ) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, : len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[: n_active + 1], k, n_active] = x[: n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef)
class OrthogonalMatchingPursuit(MultiOutputMixin, RegressorMixin, LinearModel):
0
2023-10-07 13:19:48+00:00
16k
hellloxiaotian/KDNet
test_ccpd.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distill...
import argparse import json import os import numpy as np import torch import yaml from pathlib import Path from threading import Thread from tqdm import tqdm from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized, TracedModel from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,939
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging - Media Panel Plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any():
def test(data, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, half_precision=True, trace=False, is_coco=False, v5_metric=False): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size if trace: model = TracedModel(model, device, imgsz) # Half half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] if v5_metric: print("Testing with YOLOv5 AP metric...") seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging - Media Panel Plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, v5_metric=v5_metric, save_dir=save_dir, names=names)
15
2023-10-08 13:05:58+00:00
16k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n ...
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
13,447
BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager,
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager,
updates_manager: UpdatesManager):
6
2023-10-12 02:01:46+00:00
16k
azuline/rose
rose/watcher.py
[ { "identifier": "update_cache_evict_nonexistent_collages", "path": "rose/cache.py", "snippet": "def update_cache_evict_nonexistent_collages(c: Config) -> None:\n logger.debug(\"Evicting cached collages that are not on disk\")\n collage_names: list[str] = []\n for f in os.scandir(c.music_source_...
import asyncio import contextlib import logging import sys import time from dataclasses import dataclass from pathlib import Path from queue import Empty, Queue from typing import Literal from watchdog.events import ( FileSystemEvent, FileSystemEventHandler, FileSystemMovedEvent, ) from watchdog.observers import Observer from rose.cache import ( update_cache_evict_nonexistent_collages, update_cache_evict_nonexistent_playlists, update_cache_evict_nonexistent_releases, update_cache_for_collages, update_cache_for_playlists, update_cache_for_releases, ) from rose.config import Config
10,825
""" The watcher module is architected to decouple the event listener from the event processor. This is because we must introduce a wait into the event processor, but we do not want to block the event listener. In order to performantly introduce such a wait, we've made the event processor async. However, the event listener uses a non-async library, so the event listener is sync. Why do we need to introduce a wait time into the event processor? Changes to releases occur across an entire directory, but change events come in one file at a time. If we respond to a file event too early, we may do silly things like write a new `.rose.{uuid}.toml` file, only to have a pre-existing one suddenly appear after. We only want to operate on a release once all files have finished changing. Let's dive down into the details. The watcher is a process that spawns a background thread and then runs an event loop. Hierarchically: Process Thread -> watchdog/inotify listener that enqueues events Event Loop -> processes+debounces events asynchronously from the queue """ logger = logging.getLogger(__name__) # Shorten wait times if we are in a test. This way a test runs faster. This is wasteful in # production though. WAIT_DIVIDER = 1 if "pytest" not in sys.modules else 10 EventType = Literal["created", "deleted", "modified", "moved"] EVENT_TYPES: list[EventType] = ["created", "deleted", "modified", "moved"] @dataclass(frozen=True) class WatchdogEvent: type: EventType collage: str | None = None playlist: str | None = None release: Path | None = None class EventHandler(FileSystemEventHandler): # pragma: no cover def __init__(self, config: Config, queue: Queue[WatchdogEvent]): super().__init__() self.config = config self.queue = queue def on_any_event(self, event: FileSystemEvent) -> None: super().on_any_event(event) # type: ignore path = event.dest_path if isinstance(event, FileSystemMovedEvent) else event.src_path logger.debug(f"Notified of {event.event_type} event for {path}") etype: EventType = event.event_type # type: ignore if etype not in EVENT_TYPES: return # Collage event. relative_path = path.removeprefix(str(self.config.music_source_dir) + "/") if relative_path.startswith("!collages/"): if not relative_path.endswith(".toml"): return collage = relative_path.removeprefix("!collages/").removesuffix(".toml") logger.debug(f"Queueing {etype} event on collage {collage}") self.queue.put(WatchdogEvent(collage=collage, type=etype)) return # Playlist event. if relative_path.startswith("!playlists/"): if not relative_path.endswith(".toml"): return playlist = relative_path.removeprefix("!playlists/").removesuffix(".toml") logger.debug(f"Queueing {etype} event on playlist {playlist}") self.queue.put(WatchdogEvent(playlist=playlist, type=etype)) return # Release event. with contextlib.suppress(IndexError): final_path_part = Path(relative_path).parts[0] if final_path_part == "/": return release_dir = self.config.music_source_dir / final_path_part logger.debug(f"Queueing {etype} event on release {release_dir}") self.queue.put(WatchdogEvent(release=release_dir, type=etype)) async def handle_event( c: Config, e: WatchdogEvent, wait: float | None = None, ) -> None: # pragma: no cover if wait: await asyncio.sleep(wait / WAIT_DIVIDER) if e.type == "created" or e.type == "modified": if e.collage:
""" The watcher module is architected to decouple the event listener from the event processor. This is because we must introduce a wait into the event processor, but we do not want to block the event listener. In order to performantly introduce such a wait, we've made the event processor async. However, the event listener uses a non-async library, so the event listener is sync. Why do we need to introduce a wait time into the event processor? Changes to releases occur across an entire directory, but change events come in one file at a time. If we respond to a file event too early, we may do silly things like write a new `.rose.{uuid}.toml` file, only to have a pre-existing one suddenly appear after. We only want to operate on a release once all files have finished changing. Let's dive down into the details. The watcher is a process that spawns a background thread and then runs an event loop. Hierarchically: Process Thread -> watchdog/inotify listener that enqueues events Event Loop -> processes+debounces events asynchronously from the queue """ logger = logging.getLogger(__name__) # Shorten wait times if we are in a test. This way a test runs faster. This is wasteful in # production though. WAIT_DIVIDER = 1 if "pytest" not in sys.modules else 10 EventType = Literal["created", "deleted", "modified", "moved"] EVENT_TYPES: list[EventType] = ["created", "deleted", "modified", "moved"] @dataclass(frozen=True) class WatchdogEvent: type: EventType collage: str | None = None playlist: str | None = None release: Path | None = None class EventHandler(FileSystemEventHandler): # pragma: no cover def __init__(self, config: Config, queue: Queue[WatchdogEvent]): super().__init__() self.config = config self.queue = queue def on_any_event(self, event: FileSystemEvent) -> None: super().on_any_event(event) # type: ignore path = event.dest_path if isinstance(event, FileSystemMovedEvent) else event.src_path logger.debug(f"Notified of {event.event_type} event for {path}") etype: EventType = event.event_type # type: ignore if etype not in EVENT_TYPES: return # Collage event. relative_path = path.removeprefix(str(self.config.music_source_dir) + "/") if relative_path.startswith("!collages/"): if not relative_path.endswith(".toml"): return collage = relative_path.removeprefix("!collages/").removesuffix(".toml") logger.debug(f"Queueing {etype} event on collage {collage}") self.queue.put(WatchdogEvent(collage=collage, type=etype)) return # Playlist event. if relative_path.startswith("!playlists/"): if not relative_path.endswith(".toml"): return playlist = relative_path.removeprefix("!playlists/").removesuffix(".toml") logger.debug(f"Queueing {etype} event on playlist {playlist}") self.queue.put(WatchdogEvent(playlist=playlist, type=etype)) return # Release event. with contextlib.suppress(IndexError): final_path_part = Path(relative_path).parts[0] if final_path_part == "/": return release_dir = self.config.music_source_dir / final_path_part logger.debug(f"Queueing {etype} event on release {release_dir}") self.queue.put(WatchdogEvent(release=release_dir, type=etype)) async def handle_event( c: Config, e: WatchdogEvent, wait: float | None = None, ) -> None: # pragma: no cover if wait: await asyncio.sleep(wait / WAIT_DIVIDER) if e.type == "created" or e.type == "modified": if e.collage:
update_cache_for_collages(c, [e.collage])
3
2023-10-09 14:42:23+00:00
16k
grainseed/monitask
sam/segment_anything/build_sam.py
[ { "identifier": "Sam", "path": "sam/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\r\n mask_threshold: float = 0.0\r\n image_format: str = \"RGB\"\r\n\r\n def __init__(\r\n self,\r\n image_encoder: ImageEncoderViT,\r\n prompt_encoder: PromptEncoder,\r\...
import torch from functools import partial from .modeling import ImageEncoderViT, MaskDecoder,MaskDecoderHQ, PromptEncoder, Sam, TwoWayTransformer, TinyViT
11,626
mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoderHQ( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, vit_dim=160, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: device = "cuda" if torch.cuda.is_available() else "cpu" state_dict = torch.load(f, map_location=device) info = mobile_sam.load_state_dict(state_dict, strict=False) #print(info) for n, p in mobile_sam.named_parameters(): if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n: p.requires_grad = False return mobile_sam def build_mobile_sam(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoder( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: state_dict = torch.load(f,map_location=torch.device(device)) mobile_sam.load_state_dict(state_dict,strict=True) return mobile_sam sam_model_registry = { "default": build_sam_vit_h, "vit_h": build_sam_vit_h, "vit_l": build_sam_vit_l, "vit_b": build_sam_vit_b, "vit_tiny": build_sam_vit_t, "mobile_sam": build_mobile_sam, } def _build_sam( encoder_embed_dim, encoder_depth, encoder_num_heads, encoder_global_attn_indexes, checkpoint=None, device="gpu", ): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size sam = Sam(
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. def build_sam_vit_h(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_global_attn_indexes=[7, 15, 23, 31], checkpoint=checkpoint, ) build_sam = build_sam_vit_h def build_sam_vit_l(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_global_attn_indexes=[5, 11, 17, 23], checkpoint=checkpoint, ) def build_sam_vit_b(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_global_attn_indexes=[2, 5, 8, 11], checkpoint=checkpoint, ) def build_sam_vit_t(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoderHQ( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, vit_dim=160, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: device = "cuda" if torch.cuda.is_available() else "cpu" state_dict = torch.load(f, map_location=device) info = mobile_sam.load_state_dict(state_dict, strict=False) #print(info) for n, p in mobile_sam.named_parameters(): if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n: p.requires_grad = False return mobile_sam def build_mobile_sam(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoder( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: state_dict = torch.load(f,map_location=torch.device(device)) mobile_sam.load_state_dict(state_dict,strict=True) return mobile_sam sam_model_registry = { "default": build_sam_vit_h, "vit_h": build_sam_vit_h, "vit_l": build_sam_vit_l, "vit_b": build_sam_vit_b, "vit_tiny": build_sam_vit_t, "mobile_sam": build_mobile_sam, } def _build_sam( encoder_embed_dim, encoder_depth, encoder_num_heads, encoder_global_attn_indexes, checkpoint=None, device="gpu", ): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size sam = Sam(
image_encoder=ImageEncoderViT(
1
2023-10-14 13:45:54+00:00
16k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_mbrcsl_mlpdyn_roboverse.py
[ { "identifier": "EnsembleDynamicsModel", "path": "offlinerlkit/modules/dynamics_module.py", "snippet": "class EnsembleDynamicsModel(nn.Module):\n def __init__(\n self,\n obs_dim: int,\n action_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n num_ensemble: in...
import numpy as np import torch import roboverse import argparse import os import random import pickle import datetime from copy import deepcopy from typing import Dict, Tuple from collections import defaultdict from offlinerlkit.modules import EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.termination_fns import get_termination_fn from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import RcslPolicyTrainer, DiffusionPolicyTrainer from offlinerlkit.utils.none_or_str import none_or_str from offlinerlkit.policy import SimpleDiffusionPolicy, AutoregressivePolicy
14,115
''' Recommended hyperparameters: pickplace, horizon=40, behavior_epoch=30 doubledraweropen, horizon=50, behavior_epoch=40 doubledrawercloseopen, horizon=80, behavior_epoch=40 ''' def get_args(): parser = argparse.ArgumentParser() # general parser.add_argument("--algo-name", type=str, default="mbrcsl_mlpdyn") parser.add_argument("--task", type=str, default="pickplace", help="task name") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--last_eval", action="store_false") # env config parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") # transformer_autoregressive dynamics parser.add_argument("--dynamics_lr", type=float, default=1e-3) parser.add_argument("--dynamics_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics_weight_decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n_ensemble", type=int, default=7) parser.add_argument("--n_elites", type=int, default=5) parser.add_argument("--load_dynamics_path", type=none_or_str, default=None) # Behavior policy (diffusion) parser.add_argument("--behavior_epoch", type=int, default=30) parser.add_argument("--num_diffusion_iters", type=int, default=5, help="Number of diffusion steps") parser.add_argument('--behavior_batch', type=int, default=256) parser.add_argument('--load_diffusion_path', type=none_or_str, default=None) parser.add_argument('--task_weight', type=float, default=1.4, help="Weight on task data when training diffusion policy") parser.add_argument('--sample_ratio', type=float, default=0.8, help="Use (sample_ratio * num_total_data) data to train diffusion policy") # Rollout parser.add_argument('--rollout_ckpt_path', type=none_or_str, default=None, help="file dir, used to load/store rollout trajs" ) parser.add_argument('--rollout_epoch', type=int, default=200, help="Max number of epochs to rollout the policy") parser.add_argument('--num_need_traj', type=int, default=5000, help="Needed valid trajs in rollout") parser.add_argument("--rollout-batch", type=int, default=200, help="Number of trajs to be sampled at one time") # RCSL policy (mlp) parser.add_argument("--rcsl_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--rcsl_lr", type=float, default=1e-3) parser.add_argument("--rcsl_batch", type=int, default=256) parser.add_argument("--rcsl_epoch", type=int, default=100) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--holdout_ratio", type=float, default=0.2) return parser.parse_args() def rollout_simple( init_obss: np.ndarray, dynamics,
''' Recommended hyperparameters: pickplace, horizon=40, behavior_epoch=30 doubledraweropen, horizon=50, behavior_epoch=40 doubledrawercloseopen, horizon=80, behavior_epoch=40 ''' def get_args(): parser = argparse.ArgumentParser() # general parser.add_argument("--algo-name", type=str, default="mbrcsl_mlpdyn") parser.add_argument("--task", type=str, default="pickplace", help="task name") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--last_eval", action="store_false") # env config parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") # transformer_autoregressive dynamics parser.add_argument("--dynamics_lr", type=float, default=1e-3) parser.add_argument("--dynamics_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics_weight_decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n_ensemble", type=int, default=7) parser.add_argument("--n_elites", type=int, default=5) parser.add_argument("--load_dynamics_path", type=none_or_str, default=None) # Behavior policy (diffusion) parser.add_argument("--behavior_epoch", type=int, default=30) parser.add_argument("--num_diffusion_iters", type=int, default=5, help="Number of diffusion steps") parser.add_argument('--behavior_batch', type=int, default=256) parser.add_argument('--load_diffusion_path', type=none_or_str, default=None) parser.add_argument('--task_weight', type=float, default=1.4, help="Weight on task data when training diffusion policy") parser.add_argument('--sample_ratio', type=float, default=0.8, help="Use (sample_ratio * num_total_data) data to train diffusion policy") # Rollout parser.add_argument('--rollout_ckpt_path', type=none_or_str, default=None, help="file dir, used to load/store rollout trajs" ) parser.add_argument('--rollout_epoch', type=int, default=200, help="Max number of epochs to rollout the policy") parser.add_argument('--num_need_traj', type=int, default=5000, help="Needed valid trajs in rollout") parser.add_argument("--rollout-batch", type=int, default=200, help="Number of trajs to be sampled at one time") # RCSL policy (mlp) parser.add_argument("--rcsl_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--rcsl_lr", type=float, default=1e-3) parser.add_argument("--rcsl_batch", type=int, default=256) parser.add_argument("--rcsl_epoch", type=int, default=100) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--holdout_ratio", type=float, default=0.2) return parser.parse_args() def rollout_simple( init_obss: np.ndarray, dynamics,
rollout_policy: SimpleDiffusionPolicy,
13
2023-10-11 08:36:06+00:00
16k
wilhelmagren/finq
finq/portfolio.py
[ { "identifier": "Asset", "path": "finq/asset.py", "snippet": "class Asset(object):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data: pd.Series,\n name: str,\n *,\n market: Optional[str] = None,\n index_name: Optional[str] = None,\n price_type: str =...
import logging import pandas as pd import numpy as np import scipy.optimize as scipyopt import matplotlib.pyplot as plt from functools import wraps from tqdm import tqdm from finq.asset import Asset from finq.datasets import Dataset from finq.exceptions import ( FinqError, InvalidCombinationOfArgumentsError, InvalidPortfolioWeightsError, ObjectiveFunctionError, PortfolioNotYetOptimizedError, ) from finq.formulas import ( period_returns, sharpe_ratio, weighted_returns, weighted_variance, ) from typing import ( Any, Callable, List, Dict, Tuple, Union, Optional, )
10,853
return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """ return weighted_returns(self._weights.T, self.daily_returns_mean()) @check_valid_weights
""" MIT License Copyright (c) 2023 Wilhelm Ågren Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. File created: 2023-10-20 Last updated: 2023-11-10 """ log = logging.getLogger(__name__) class Portfolio(object): """ """ # For a full list of `scipy` optimization methods and references, see the link below. # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html _supported_optimization_methods = ( "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ) _weight_initializations = { "lognormal": np.random.lognormal, "normal": np.random.normal, "uniform": np.random.uniform, } def __init__( self, data: Union[Dataset, List[Asset], np.ndarray, pd.DataFrame], *, weights: Optional[np.ndarray] = None, names: Optional[Union[Dict[str, str], List[str]]] = None, symbols: Optional[Union[Dict[str, str], List[str]]] = None, confidence_level: float = 0.95, risk_free_rate: float = 5e-3, n_trading_days: int = 252, objective_function: Optional[Callable] = None, objective_function_args: Tuple[Any, ...] = (), objective_bounds: Optional[List[Tuple[int, ...]]] = None, objective_constraints: Optional[Tuple[Dict, ...]] = None, ): """ """ if isinstance(data, Dataset): assets = data.as_assets() data = list(assets.values()) symbols = list(assets.keys()) if not isinstance(data, list): if names is None and symbols is None and not isinstance(data, pd.DataFrame): raise InvalidCombinationOfArgumentsError( "You need to provide the names and ticker symbols of each asset that you " "want to include in your portfolio if the data you provided is neither a " "`list` of `Asset` objects or a `pd.DataFrame`. You can also try " "providing only one of the arguments `names` and `symbols`, but then as " "a dictionary of the form `key=name` `value=symbol`." ) if isinstance(data, list): symbols = [a.name for a in data] data = np.array([a.data for a in data]) if isinstance(data, pd.DataFrame): symbols = data.columns data = data.to_numpy().T if isinstance(names, dict): symbols = list(names.values()) names = list(names.keys()) if isinstance(symbols, dict): names = list(symbols.keys()) symbols = list(symbols.values()) self._data = data self._weights = weights self._names = names self._symbols = symbols self._confidence_level = confidence_level self._risk_free_rate = risk_free_rate self._n_trading_days = n_trading_days self._random_portfolios = None self._objective_function = objective_function self._objective_function_args = objective_function_args self._objective_bounds = objective_bounds self._objective_constraints = objective_constraints def weights_are_normalized(self) -> bool: """ """ return np.allclose(self._weights.sum(), 1.0, rtol=1e-6) def initialize_random_weights( self, distribution: Union[str, Callable], *args: Tuple[Any, ...], **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) weights = distribution(*args, **kwargs) self._weights = weights / weights.sum() def check_valid_weights(func) -> Callable: """ """ @wraps(func) def _check_valid_weights(self, *args, **kwargs) -> Optional[FinqError]: """ """ if self._weights is None: raise PortfolioNotYetOptimizedError( "Portfolio weights are `None`. Perhaps you have not yet optimized it? " ) if not self.weights_are_normalized(): raise InvalidPortfolioWeightsError( "Your portfolio weights are not normalized. Make sure to normalize them " "(they sum to one) before calculating any analytical quantities. " ) return func(self, *args, **kwargs) return _check_valid_weights def daily_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=1) def yearly_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=self._n_trading_days) def period_returns(self, period: int) -> np.ndarray: """ """ return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """ return weighted_returns(self._weights.T, self.daily_returns_mean()) @check_valid_weights
def sharpe_ratio(self) -> float:
8
2023-10-09 19:02:54+00:00
16k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sa...
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
11,609
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper:
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper:
classifier_model = Crop(classifier_model)
12
2023-10-10 09:40:10+00:00
16k
cpuimage/minSDXLTF
stable_diffusion_xl/stable_diffusion_xl.py
[ { "identifier": "SimpleTokenizer", "path": "stable_diffusion_xl/clip_tokenizer.py", "snippet": "class SimpleTokenizer:\n def __init__(self, bpe_path=None):\n bpe_path = bpe_path or tf.keras.utils.get_file(\n \"bpe_simple_vocab_16e6.txt.gz\",\n \"https://github.com/openai/...
import numpy as np import tensorflow as tf from PIL import Image from scipy.ndimage import correlate1d from .clip_tokenizer import SimpleTokenizer from .diffusion_model import DiffusionXLModel from .image_decoder import ImageDecoder from .image_encoder import ImageEncoder from .long_prompt_weighting import get_weighted_text_embeddings from .scheduler import Scheduler from .text_encoder_laion import TextEncoderLaion, TextEncoderLaionProj from .text_encoder_openai import TextEncoderOpenAi
12,844
def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None: self._tokenizer = SimpleTokenizer() return self._tokenizer def _get_initial_diffusion_noise(self, batch_size, seed): if seed is not None: try: seed = int(seed) except: seed = None return tf.random.stateless_normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=[seed, seed], ) else: return tf.random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4) ) def _get_initial_diffusion_latent(self, batch_size, init_latent=None, init_time=None, seed=None, noise=None): if noise is None: noise = self._get_initial_diffusion_noise(batch_size, seed=seed) if init_latent is None: latent = noise else: latent = self.scheduler.signal_rates[init_time] * np.repeat(init_latent, batch_size, axis=0) + \ self.scheduler.noise_rates[init_time] * noise return latent @staticmethod def _get_pos_ids(): return np.asarray([list(range(MAX_PROMPT_LENGTH))], dtype=np.int32) class StableDiffusionXL(StableDiffusionXLBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusionXL API, as well as the APIs of the sub-components of StableDiffusionXL (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL from PIL import Image model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=1024, img_width=1024, jit_compile=True, unet_ckpt=None, text_encoder_ckpt=None, text_encoder2_ckpt=None, vae_ckpt=None, ): super().__init__(img_height, img_width, jit_compile) self.unet_ckpt = unet_ckpt self.text_encoder_ckpt = text_encoder_ckpt self.text_encoder2_ckpt = text_encoder2_ckpt self.vae_ckpt = vae_ckpt @property def text_encoder_openai(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_openai is None: self._text_encoder_openai = TextEncoderOpenAi(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder_ckpt) if self.jit_compile: self._text_encoder_openai.compile(jit_compile=True) return self._text_encoder_openai @property def text_encoder_laion(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion is None:
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras implementation of StableDiffusionXL.""" MAX_PROMPT_LENGTH = 77 class StableDiffusionXLBase: """Base class for stable diffusion xl model.""" def __init__(self, img_height=1024, img_width=1024, jit_compile=False, active_lcm=False): self.img_height = img_height self.img_width = img_width # lazy initialize the component models and the tokenizer self._image_encoder = None self._text_encoder_laion = None self._text_encoder_laion_proj = None self._text_encoder_openai = None self._diffusion_model = None self._image_decoder = None self._tokenizer = None self.jit_compile = jit_compile self.active_lcm = active_lcm self.scheduler = Scheduler(active_lcm=active_lcm) def text_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def image_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def inpaint( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, inpaint_mask=None, mask_blur_strength=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, inpaint_mask=inpaint_mask, mask_blur_strength=mask_blur_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def encode_text(self, prompt): """Encodes a prompt into a latent text encoding. The encoding produced by this method should be used as the `encoded_text` parameter of `StableDiffusion.generate_image`. Encoding text separately from generating an image can be used to arbitrarily modify the text encoding prior to image generation, e.g. for walking between two prompts. Args: prompt: a string to encode, must be 77 tokens or shorter. Example: ```python from keras_cv.models import StableDiffusion model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) encoded_text = model.encode_text("Tacos at dawn") img = model.generate_image(encoded_text) ``` """ # Tokenize prompt (i.e. starting context) context_openai, _ = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_openai, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=49407) context_laion, add_text_embeds = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_laion, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=0, text_encoder_pool=self.text_encoder_laion_proj) return np.concatenate([context_openai, context_laion], axis=-1), add_text_embeds def gaussian_blur(self, image, radius=3, h_axis=1, v_axis=2): def build_filter1d(kernel_size): if kernel_size == 1: filter1d = [1] else: triangle = [[1, 1]] for i in range(1, kernel_size - 1): cur_row = [1] prev_row = triangle[i - 1] for j in range(len(prev_row) - 1): cur_row.append(prev_row[j] + prev_row[j + 1]) cur_row.append(1) triangle.append(cur_row) filter1d = triangle[-1] filter1d = np.reshape(filter1d, (kernel_size,)) return filter1d / np.sum(filter1d) weights = build_filter1d(radius) # Apply filter horizontally blurred_image = correlate1d(image, weights, axis=h_axis, output=None, mode="reflect", cval=0.0, origin=0) # Apply filter vertically blurred_image = correlate1d(blurred_image, weights, axis=v_axis, output=None, mode="reflect", cval=0.0, origin=0) return blurred_image @staticmethod def resize(image_array, new_h=None, new_w=None): h, w, c = image_array.shape if new_h == h and new_w == w: return image_array h_bounds = 0, h - 1 w_bounds = 0, w - 1 y = np.expand_dims(np.linspace(h_bounds[0], h_bounds[1], new_h), axis=-1) x = np.expand_dims(np.linspace(w_bounds[0], w_bounds[1], new_w), axis=0) # Calculate the floor and ceiling values of x and y x_floor = np.floor(x).astype(int) x_ceil = np.ceil(x).astype(int) y_floor = np.floor(y).astype(int) y_ceil = np.ceil(y).astype(int) # Clip the values to stay within the image bounds x_floor = np.clip(x_floor, w_bounds[0], w_bounds[1]) x_ceil = np.clip(x_ceil, w_bounds[0], w_bounds[1]) y_floor = np.clip(y_floor, h_bounds[0], h_bounds[1]) y_ceil = np.clip(y_ceil, h_bounds[0], h_bounds[1]) # Calculate the fractional part of x and y dx = x - x_floor dy = y - y_floor # Get the values of the four neighboring pixels dx = np.expand_dims(dx, axis=-1) dy = np.expand_dims(dy, axis=-1) q11 = image_array[y_floor, x_floor, :] q21 = image_array[y_floor, x_ceil, :] q12 = image_array[y_ceil, x_floor, :] q22 = image_array[y_ceil, x_ceil, :] # Perform bilinear interpolation top_interp = q11 * (1.0 - dx) + q21 * dx bottom_interp = q12 * (1.0 - dx) + q22 * dx interpolated = top_interp * (1.0 - dy) + bottom_interp * dy return interpolated def preprocessed_image(self, x): if type(x) is str: x = np.array(Image.open(x).convert("RGB")) else: x = np.asarray(x) image_array = self.resize(x, self.img_height, self.img_width) image_array = np.array(image_array, dtype=np.float32) / 255.0 input_image_array = image_array[None, ..., :3] input_image_tensor = input_image_array * 2.0 - 1.0 return input_image_array, input_image_tensor def preprocessed_mask(self, x, blur_radius=5): if type(x) is str: x = np.array(Image.open(x).convert("L")) else: x = np.asarray(x) if len(x.shape) == 2: x = np.expand_dims(x, axis=-1) mask_array = self.resize(x, self.img_height, self.img_width) if mask_array.shape[-1] != 1: mask_array = np.mean(mask_array, axis=-1, keepdims=True) input_mask_array = np.array(mask_array, dtype=np.float32) / 255.0 if blur_radius is not None: input_mask_array = self.gaussian_blur(input_mask_array, radius=blur_radius, h_axis=0, v_axis=1) latent_mask_tensor = self.resize(input_mask_array, self.img_width // 8, self.img_height // 8) return np.expand_dims(input_mask_array, axis=0), np.expand_dims(latent_mask_tensor, axis=0) def rescale_noise_cfg(self, noise_cfg, noise_pred_text, guidance_rescale=0.0, epsilon=1e-05): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891). See Section 3.4 """ std_text = np.std(noise_pred_text, axis=tuple(range(1, len(noise_pred_text.shape))), keepdims=True) std_cfg = np.std(noise_cfg, axis=tuple(range(1, len(noise_cfg.shape))), keepdims=True) + epsilon # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg return noise_cfg def generate_image( self, encoded_text, add_text_embeds, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, diffusion_noise=None, seed=None, inpaint_mask=None, mask_blur_strength=None, reference_image=None, reference_image_strength=0.8, callback=None, original_size=None, crops_coords_top_left=(0, 0), guidance_rescale=0.0, target_size=None): """Generates an image based on encoded text. The encoding passed to this method should be derived from `StableDiffusion.encode_text`. Args: encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor of shape (77, 768). When the batch axis is omitted, the same encoded text will be used to produce every generated image. batch_size: int, number of images to generate, defaults to 1. negative_prompt: a string containing information to negatively guide the image generation (e.g. by removing or altering certain aspects of the generated image), defaults to None. num_steps: int, number of diffusion steps (controls image quality), defaults to 50. unconditional_guidance_scale: float, controlling how closely the image should adhere to the prompt. Larger values result in more closely adhering to the prompt, but will make the image noisier. Defaults to 7.5. diffusion_noise: Tensor of shape (`batch_size`, img_height // 8, img_width // 8, 4), or a Tensor of shape (img_height // 8, img_width // 8, 4). Optional custom noise to seed the diffusion process. When the batch axis is omitted, the same noise will be used to seed diffusion for every generated image. seed: integer which is used to seed the random generation of diffusion noise, only to be specified if `diffusion_noise` is None. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL batch_size = 8 model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) e_tacos = model.encode_text("Tacos at dawn") e_watermelons = model.encode_text("Watermelons at dusk") e_interpolated = tf.linspace(e_tacos, e_watermelons, batch_size) images = model.generate_image(e_interpolated, batch_size=batch_size) ``` """ if diffusion_noise is not None and seed is not None: raise ValueError( "`diffusion_noise` and `seed` should not both be passed to " "`generate_image`. `seed` is only used to generate diffusion " "noise when it's not already user-specified." ) context = self._expand_tensor(encoded_text, batch_size) if negative_prompt is None: negative_prompt = "" unconditional_context, unconditional_add_text_embeds = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor(unconditional_context, batch_size) if diffusion_noise is not None: diffusion_noise = np.squeeze(diffusion_noise) if len(diffusion_noise.shape) == 3: diffusion_noise = np.repeat(np.expand_dims(diffusion_noise, axis=0), batch_size, axis=0) # Iterative reverse diffusion stage self.scheduler.set_timesteps(num_steps) timesteps = self.scheduler.timesteps[::-1] init_time = None init_latent = None input_image_array = None input_mask_array = None latent_mask_tensor = None if inpaint_mask is not None: input_mask_array, latent_mask_tensor = self.preprocessed_mask(inpaint_mask, mask_blur_strength) if input_mask_array is None or latent_mask_tensor is None: print("wrong inpaint mask:{}".format(inpaint_mask)) if reference_image is not None and (0. < reference_image_strength < 1.): input_image_array, input_image_tensor = self.preprocessed_image(reference_image) if input_image_tensor is not None: num_steps = int(num_steps * reference_image_strength + 0.5) init_time = timesteps[num_steps] init_latent = self.image_encoder.predict_on_batch(input_image_tensor) timesteps = timesteps[:num_steps] else: print("wrong reference image:{}".format(reference_image)) latent = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=init_time, seed=seed, noise=diffusion_noise) progbar = tf.keras.utils.Progbar(len(timesteps)) iteration = 0 if original_size is None: original_size = [self.img_height, self.img_width] if target_size is None: target_size = [self.img_height, self.img_width] add_time_ids = tf.expand_dims( tf.convert_to_tensor(list(list(original_size) + list(crops_coords_top_left) + list(target_size)), latent.dtype), axis=0) for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector time_emb = np.repeat(np.reshape(timestep, [1, -1]), batch_size, axis=0) if unconditional_guidance_scale > 0.0: unconditional_latent = self.diffusion_model.predict_on_batch( [latent, time_emb, unconditional_context, add_time_ids, tf.zeros_like(add_text_embeds)]) latent_text = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = unconditional_latent + unconditional_guidance_scale * ( latent_text - unconditional_latent) if guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/abs/2305.08891 latent = self.rescale_noise_cfg(latent, latent_text, guidance_rescale=guidance_rescale) else: latent = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = self.scheduler.step(latent, timestep, latent_prev) if latent_mask_tensor is not None and init_latent is not None: latent_orgin = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=timestep, seed=seed, noise=diffusion_noise) latent = latent_orgin * (1. - latent_mask_tensor) + latent * latent_mask_tensor iteration += 1 if callback is not None: callback(iteration) progbar.update(iteration) # Decoding stage decoded = self.image_decoder.predict_on_batch(latent) decoded = np.array(((decoded + 1.) * 0.5), dtype=np.float32) if input_mask_array is not None and input_image_array is not None: decoded = input_image_array * (1. - input_mask_array) + decoded * input_mask_array return np.clip(decoded * 255., 0, 255).astype("uint8") def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = np.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = np.repeat( np.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): pass @property def text_encoder_openai(self): pass @property def text_encoder_laion(self): pass @property def text_encoder_laion_proj(self): pass @property def diffusion_model(self): pass @property def image_decoder(self): pass @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None: self._tokenizer = SimpleTokenizer() return self._tokenizer def _get_initial_diffusion_noise(self, batch_size, seed): if seed is not None: try: seed = int(seed) except: seed = None return tf.random.stateless_normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=[seed, seed], ) else: return tf.random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4) ) def _get_initial_diffusion_latent(self, batch_size, init_latent=None, init_time=None, seed=None, noise=None): if noise is None: noise = self._get_initial_diffusion_noise(batch_size, seed=seed) if init_latent is None: latent = noise else: latent = self.scheduler.signal_rates[init_time] * np.repeat(init_latent, batch_size, axis=0) + \ self.scheduler.noise_rates[init_time] * noise return latent @staticmethod def _get_pos_ids(): return np.asarray([list(range(MAX_PROMPT_LENGTH))], dtype=np.int32) class StableDiffusionXL(StableDiffusionXLBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusionXL API, as well as the APIs of the sub-components of StableDiffusionXL (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL from PIL import Image model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=1024, img_width=1024, jit_compile=True, unet_ckpt=None, text_encoder_ckpt=None, text_encoder2_ckpt=None, vae_ckpt=None, ): super().__init__(img_height, img_width, jit_compile) self.unet_ckpt = unet_ckpt self.text_encoder_ckpt = text_encoder_ckpt self.text_encoder2_ckpt = text_encoder2_ckpt self.vae_ckpt = vae_ckpt @property def text_encoder_openai(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_openai is None: self._text_encoder_openai = TextEncoderOpenAi(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder_ckpt) if self.jit_compile: self._text_encoder_openai.compile(jit_compile=True) return self._text_encoder_openai @property def text_encoder_laion(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion is None:
self._text_encoder_laion = TextEncoderLaion(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder2_ckpt)
6
2023-10-14 18:40:16+00:00
16k
spla-tam/SplaTAM
scripts/iphone_demo.py
[ { "identifier": "relative_transformation", "path": "datasets/gradslam_datasets/geometryutils.py", "snippet": "def relative_transformation(\n trans_01: torch.Tensor, trans_02: torch.Tensor, orthogonal_rotations: bool = False\n) -> torch.Tensor:\n r\"\"\"Function that computes the relative homogenou...
import argparse import os import shutil import sys import time import json import cv2 import matplotlib.pyplot as plt import numpy as np import torch import torch.nn.functional as F import cyclonedds.idl as idl import cyclonedds.idl.annotations as annotate import cyclonedds.idl.types as types from pathlib import Path from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets.geometryutils import relative_transformation from utils.common_utils import seed_everything, save_params_ckpt, save_params from utils.eval_helpers import report_progress from utils.keyframe_selection import keyframe_selection_overlap from utils.recon_helpers import setup_camera from utils.slam_external import build_rotation, prune_gaussians, densify from scripts.splatam import get_loss, initialize_optimizer, initialize_params, initialize_camera_pose, get_pointcloud, add_new_gaussians from diff_gaussian_rasterization import GaussianRasterizer as Renderer from dataclasses import dataclass from cyclonedds.domain import DomainParticipant, Domain from cyclonedds.core import Qos, Policy from cyclonedds.sub import DataReader from cyclonedds.topic import Topic from cyclonedds.util import duration
13,627
# Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'], config['tracking']['use_sil_for_loss'], config['tracking']['sil_thres'], config['tracking']['use_l1'], config['tracking']['ignore_outlier_depth_loss'], tracking=True, visualize_tracking_loss=config['tracking']['visualize_tracking_loss'], tracking_iteration=iter) # Backprop loss.backward() # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) with torch.no_grad(): # Save the best candidate rotation & translation if loss < current_min_loss: current_min_loss = loss candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() # Report Progress if config['report_iter_progress']: report_progress(params, tracking_curr_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) else: progress_bar.update(1) # Update the runtime numbers iter_end_time = time.time() tracking_iter_time_sum += iter_end_time - iter_start_time tracking_iter_time_count += 1 # Check if we should stop tracking iter += 1 if iter == num_iters_tracking: if losses['depth'] < config['tracking']['depth_loss_thres'] and config['tracking']['use_depth_loss_thres']: break elif config['tracking']['use_depth_loss_thres'] and not do_continue_slam: do_continue_slam = True progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") num_iters_tracking = 2*num_iters_tracking else: break progress_bar.close() # Copy over the best candidate rotation & translation with torch.no_grad(): params['cam_unnorm_rots'][..., time_idx] = candidate_cam_unnorm_rot params['cam_trans'][..., time_idx] = candidate_cam_tran elif time_idx > 0 and config['tracking']['use_gt_poses']: with torch.no_grad(): # Get the ground truth pose relative to frame 0 rel_w2c = curr_gt_w2c[-1] rel_w2c_rot = rel_w2c[:3, :3].unsqueeze(0).detach() rel_w2c_rot_quat = matrix_to_quaternion(rel_w2c_rot) rel_w2c_tran = rel_w2c[:3, 3].detach() # Update the camera parameters params['cam_unnorm_rots'][..., time_idx] = rel_w2c_rot_quat params['cam_trans'][..., time_idx] = rel_w2c_tran # Update the runtime numbers tracking_end_time = time.time() tracking_frame_time_sum += tracking_end_time - tracking_start_time tracking_frame_time_count += 1 if time_idx == 0 or (time_idx+1) % config['report_global_progress_every'] == 0: try: # Report Final Tracking Progress progress_bar = tqdm(range(1), desc=f"Tracking Result Time Step: {time_idx}") with torch.no_grad(): report_progress(params, tracking_curr_data, 1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) progress_bar.close() except: ckpt_output_dir = save_path.joinpath("checkpoints") os.makedirs(ckpt_output_dir, exist_ok=True) save_params_ckpt(params, ckpt_output_dir, time_idx) print('Failed to evaluate trajectory.') # Densification & KeyFrame-based Mapping if time_idx == 0 or (time_idx+1) % config['map_every'] == 0: # Densification if config['mapping']['add_new_gaussians'] and time_idx > 0: densify_curr_data = {'cam': densify_cam, 'im': densify_color, 'depth': densify_depth, 'id': time_idx, 'intrinsics': densify_intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette
""" Script to stream RGB-D data from the NeRFCapture iOS App & build a Gaussian Splat on the fly using SplaTAM. The CycloneDDS parts of this script are adapted from the Instant-NGP Repo: https://github.com/NVlabs/instant-ngp/blob/master/scripts/nerfcapture2nerf.py """ #!/usr/bin/env python3 _BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--config", default="./configs/iphone/online_demo.py", type=str, help="Path to config file.") return parser.parse_args() # DDS # ================================================================================================== @dataclass @annotate.final @annotate.autoid("sequential") class SplatCaptureFrame(idl.IdlStruct, typename="SplatCaptureData.SplatCaptureFrame"): id: types.uint32 annotate.key("id") timestamp: types.float64 fl_x: types.float32 fl_y: types.float32 cx: types.float32 cy: types.float32 transform_matrix: types.array[types.float32, 16] width: types.uint32 height: types.uint32 image: types.sequence[types.uint8] has_depth: bool depth_width: types.uint32 depth_height: types.uint32 depth_scale: types.float32 depth_image: types.sequence[types.uint8] dds_config = """<?xml version="1.0" encoding="UTF-8" ?> \ <CycloneDDS xmlns="https://cdds.io/config" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://cdds.io/config https://raw.githubusercontent.com/eclipse-cyclonedds/cyclonedds/master/etc/cyclonedds.xsd"> \ <Domain id="any"> \ <Internal> \ <MinimumSocketReceiveBufferSize>10MB</MinimumSocketReceiveBufferSize> \ </Internal> \ <Tracing> \ <Verbosity>config</Verbosity> \ <OutputFile>stdout</OutputFile> \ </Tracing> \ </Domain> \ </CycloneDDS> \ """ # ================================================================================================== def dataset_capture_loop(reader: DataReader, save_path: Path, overwrite: bool, n_frames: int, depth_scale: float, config: dict): rgb_path = save_path.joinpath("rgb") if rgb_path.exists(): if overwrite: # Prompt user to confirm deletion if (input(f"warning! folder '{save_path}' will be deleted/replaced. continue? (Y/n)").lower().strip()+"y")[:1] != "y": sys.exit(1) shutil.rmtree(save_path) else: print(f"rgb_path {rgb_path} already exists. Please use overwrite=True in config if you want to overwrite.") sys.exit(1) print("Waiting for frames...") # Make directory images_dir = save_path.joinpath("rgb") manifest = { "fl_x": 0.0, "fl_y": 0.0, "cx": 0.0, "cy": 0.0, "w": 0.0, "h": 0.0, "frames": [] } total_frames = 0 # Total frames received time_idx = total_frames num_frames = n_frames # Total frames desired # Initialize list to keep track of Keyframes keyframe_list = [] keyframe_time_indices = [] # Init Variables to keep track of ARkit poses and runtimes gt_w2c_all_frames = [] tracking_iter_time_sum = 0 tracking_iter_time_count = 0 mapping_iter_time_sum = 0 mapping_iter_time_count = 0 tracking_frame_time_sum = 0 tracking_frame_time_count = 0 mapping_frame_time_sum = 0 mapping_frame_time_count = 0 P = torch.tensor( [ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1] ] ).float() # Start DDS Loop while True: sample = reader.read_next() # Get frame from NeRFCapture if sample: print(f"{total_frames + 1}/{n_frames} frames received") if total_frames == 0: save_path.mkdir(parents=True, exist_ok=True) images_dir.mkdir(exist_ok=True) manifest["w"] = sample.width manifest["h"] = sample.height manifest["cx"] = sample.cx manifest["cy"] = sample.cy manifest["fl_x"] = sample.fl_x manifest["fl_y"] = sample.fl_y manifest["integer_depth_scale"] = float(depth_scale)/65535.0 if sample.has_depth: depth_dir = save_path.joinpath("depth") depth_dir.mkdir(exist_ok=True) # RGB image = np.asarray(sample.image, dtype=np.uint8).reshape((sample.height, sample.width, 3)) cv2.imwrite(str(images_dir.joinpath(f"{total_frames}.png")), cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) # Depth if avaiable save_depth = None if sample.has_depth: # Save Depth Image save_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) save_depth = (save_depth*65535/float(depth_scale)).astype(np.uint16) save_depth = cv2.resize(save_depth, dsize=( sample.width, sample.height), interpolation=cv2.INTER_NEAREST) cv2.imwrite(str(depth_dir.joinpath(f"{total_frames}.png")), save_depth) # Load Depth Image for SplaTAM curr_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) else: print("No Depth Image Received. Please make sure that the NeRFCapture App \ mentions Depth Supported on the top right corner. Skipping Frame...") continue # ARKit Poses for saving dataset X_WV = np.asarray(sample.transform_matrix, dtype=np.float32).reshape((4, 4)).T frame = { "transform_matrix": X_WV.tolist(), "file_path": f"rgb/{total_frames}.png", "fl_x": sample.fl_x, "fl_y": sample.fl_y, "cx": sample.cx, "cy": sample.cy, "w": sample.width, "h": sample.height } if save_depth is not None: frame["depth_path"] = f"depth/{total_frames}.png" manifest["frames"].append(frame) # Convert ARKit Pose to GradSLAM format gt_pose = torch.from_numpy(X_WV).float() gt_pose = P @ gt_pose @ P.T if time_idx == 0: first_abs_gt_pose = gt_pose gt_pose = relative_transformation(first_abs_gt_pose.unsqueeze(0), gt_pose.unsqueeze(0), orthogonal_rotations=False) gt_w2c = torch.linalg.inv(gt_pose[0]) gt_w2c_all_frames.append(gt_w2c) # Initialize Tracking & Mapping Resolution Data color = cv2.resize(image, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_LINEAR) depth = cv2.resize(curr_depth, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_NEAREST) depth = np.expand_dims(depth, -1) color = torch.from_numpy(color).cuda().float() color = color.permute(2, 0, 1) / 255 depth = torch.from_numpy(depth).cuda().float() depth = depth.permute(2, 0, 1) if time_idx == 0: intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() intrinsics = intrinsics / config['data']['downscale_factor'] intrinsics[2, 2] = 1.0 first_frame_w2c = torch.eye(4).cuda().float() cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Densification Resolution Data densify_color = cv2.resize(image, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_LINEAR) densify_depth = cv2.resize(curr_depth, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_NEAREST) densify_depth = np.expand_dims(densify_depth, -1) densify_color = torch.from_numpy(densify_color).cuda().float() densify_color = densify_color.permute(2, 0, 1) / 255 densify_depth = torch.from_numpy(densify_depth).cuda().float() densify_depth = densify_depth.permute(2, 0, 1) if time_idx == 0: densify_intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() densify_intrinsics = densify_intrinsics / config['data']['densify_downscale_factor'] densify_intrinsics[2, 2] = 1.0 densify_cam = setup_camera(densify_color.shape[2], densify_color.shape[1], densify_intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'], config['tracking']['use_sil_for_loss'], config['tracking']['sil_thres'], config['tracking']['use_l1'], config['tracking']['ignore_outlier_depth_loss'], tracking=True, visualize_tracking_loss=config['tracking']['visualize_tracking_loss'], tracking_iteration=iter) # Backprop loss.backward() # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) with torch.no_grad(): # Save the best candidate rotation & translation if loss < current_min_loss: current_min_loss = loss candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() # Report Progress if config['report_iter_progress']: report_progress(params, tracking_curr_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) else: progress_bar.update(1) # Update the runtime numbers iter_end_time = time.time() tracking_iter_time_sum += iter_end_time - iter_start_time tracking_iter_time_count += 1 # Check if we should stop tracking iter += 1 if iter == num_iters_tracking: if losses['depth'] < config['tracking']['depth_loss_thres'] and config['tracking']['use_depth_loss_thres']: break elif config['tracking']['use_depth_loss_thres'] and not do_continue_slam: do_continue_slam = True progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") num_iters_tracking = 2*num_iters_tracking else: break progress_bar.close() # Copy over the best candidate rotation & translation with torch.no_grad(): params['cam_unnorm_rots'][..., time_idx] = candidate_cam_unnorm_rot params['cam_trans'][..., time_idx] = candidate_cam_tran elif time_idx > 0 and config['tracking']['use_gt_poses']: with torch.no_grad(): # Get the ground truth pose relative to frame 0 rel_w2c = curr_gt_w2c[-1] rel_w2c_rot = rel_w2c[:3, :3].unsqueeze(0).detach() rel_w2c_rot_quat = matrix_to_quaternion(rel_w2c_rot) rel_w2c_tran = rel_w2c[:3, 3].detach() # Update the camera parameters params['cam_unnorm_rots'][..., time_idx] = rel_w2c_rot_quat params['cam_trans'][..., time_idx] = rel_w2c_tran # Update the runtime numbers tracking_end_time = time.time() tracking_frame_time_sum += tracking_end_time - tracking_start_time tracking_frame_time_count += 1 if time_idx == 0 or (time_idx+1) % config['report_global_progress_every'] == 0: try: # Report Final Tracking Progress progress_bar = tqdm(range(1), desc=f"Tracking Result Time Step: {time_idx}") with torch.no_grad(): report_progress(params, tracking_curr_data, 1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) progress_bar.close() except: ckpt_output_dir = save_path.joinpath("checkpoints") os.makedirs(ckpt_output_dir, exist_ok=True) save_params_ckpt(params, ckpt_output_dir, time_idx) print('Failed to evaluate trajectory.') # Densification & KeyFrame-based Mapping if time_idx == 0 or (time_idx+1) % config['map_every'] == 0: # Densification if config['mapping']['add_new_gaussians'] and time_idx > 0: densify_curr_data = {'cam': densify_cam, 'im': densify_color, 'depth': densify_depth, 'id': time_idx, 'intrinsics': densify_intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette
params, variables = add_new_gaussians(params, variables, densify_curr_data,
15
2023-11-30 20:26:47+00:00
16k
zhyever/PatchFusion
zoedepth/trainers/zoedepth_custom_trainer.py
[ { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forwa...
import os import torch import torch.cuda.amp as amp import torch.nn as nn import numpy as np import wandb import uuid import torch.distributed as dist import copy import torch.optim as optim import matplotlib.pyplot as plt from zoedepth.trainers.loss_sample import SILogLoss, DistributionLoss from zoedepth.trainers.loss import SILogLoss as DenseSILogLoss from zoedepth.trainers.loss import BudgetConstraint, HistogramMatchingLoss, SSIM, ConsistencyLoss from zoedepth.utils.config import DATASETS_CONFIG from zoedepth.utils.misc import compute_metrics from zoedepth.data.preprocess import get_black_border from .base_trainer import BaseTrainer, is_rank_zero, colors, flatten from torchvision import transforms from PIL import Image from tqdm import tqdm from datetime import datetime as dt from zoedepth.utils.misc import generatemask
13,062
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else:
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else:
self.scale_loss = HistogramMatchingLoss(min_depth=self.config.min_depth, max_depth=self.config.max_depth)
4
2023-12-04 08:43:15+00:00
16k
baaivision/GeoDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
11,975
key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc
elif isimage(xc):
15
2023-12-01 01:59:42+00:00
16k
lucidrains/meshgpt-pytorch
meshgpt_pytorch/trainer.py
[ { "identifier": "custom_collate", "path": "meshgpt_pytorch/data.py", "snippet": "def custom_collate(data, pad_id = -1):\n is_dict = isinstance(first(data), dict)\n\n if is_dict:\n keys = first(data).keys()\n data = [d.values() for d in data]\n\n output = []\n\n for datum in zip...
from pathlib import Path from functools import partial from packaging import version from contextlib import nullcontext, contextmanager from torch import nn, Tensor from torch.nn import Module from torch.utils.data import Dataset, DataLoader from torch.optim.lr_scheduler import _LRScheduler from pytorch_custom_utils import ( get_adam_optimizer, OptimizerWithWarmupSchedule, add_wandb_tracker_contextmanager ) from accelerate import Accelerator from accelerate.utils import DistributedDataParallelKwargs from beartype import beartype from beartype.door import is_bearable from beartype.typing import Optional, Tuple, Type, List from ema_pytorch import EMA from meshgpt_pytorch.data import custom_collate from meshgpt_pytorch.version import __version__ from meshgpt_pytorch.meshgpt_pytorch import ( MeshAutoencoder, MeshTransformer ) import torch import torch.nn.functional as F
13,006
optimizer = self.optimizer.state_dict(), version = __version__, step = self.step.item(), config = self.unwrapped_model._config ) torch.save(pkg, str(path)) def load(self, path): path = Path(path) assert path.exists() pkg = torch.load(str(path)) if version.parse(__version__) != version.parse(pkg['version']): self.print(f'loading saved mesh autoencoder at version {pkg["version"]}, but current package version is {__version__}') self.model.load_state_dict(pkg['model']) self.ema_model.load_state_dict(pkg['ema_model']) self.optimizer.load_state_dict(pkg['optimizer']) self.step.copy_(pkg['step']) def next_data_to_forward_kwargs(self, dl_iter) -> dict: data = next(dl_iter) if isinstance(data, tuple): forward_kwargs = dict(zip(self.data_kwargs, data)) elif isinstance(data, dict): forward_kwargs = data maybe_del(forward_kwargs, 'texts', 'text_embeds') return forward_kwargs def forward(self): step = self.step.item() dl_iter = cycle(self.dataloader) if self.is_main and self.should_validate: val_dl_iter = cycle(self.val_dataloader) while step < self.num_train_steps: for i in range(self.grad_accum_every): is_last = i == (self.grad_accum_every - 1) maybe_no_sync = partial(self.accelerator.no_sync, self.model) if not is_last else nullcontext forward_kwargs = self.next_data_to_forward_kwargs(dl_iter) with self.accelerator.autocast(), maybe_no_sync(): total_loss, (recon_loss, commit_loss) = self.model( **forward_kwargs, return_loss_breakdown = True ) self.accelerator.backward(total_loss / self.grad_accum_every) self.print(f'recon loss: {recon_loss.item():.3f} | commit loss: {commit_loss.sum().item():.3f}') self.log( total_loss = total_loss.item(), commit_loss = commit_loss.sum().item(), recon_loss = recon_loss.item() ) self.optimizer.step() self.optimizer.zero_grad() step += 1 self.step.add_(1) self.wait() if self.is_main: self.ema_model.update() self.wait() if self.is_main and self.should_validate and divisible_by(step, self.val_every): total_val_recon_loss = 0. self.ema_model.eval() num_val_batches = self.val_num_batches * self.grad_accum_every for _ in range(num_val_batches): with self.accelerator.autocast(), torch.no_grad(): forward_kwargs = self.next_data_to_forward_kwargs(val_dl_iter) val_loss, (val_recon_loss, val_commit_loss) = self.ema_model( **forward_kwargs, return_loss_breakdown = True ) total_val_recon_loss += (val_recon_loss / num_val_batches) self.print(f'valid recon loss: {total_val_recon_loss:.3f}') self.log(val_loss = total_val_recon_loss) self.wait() if self.is_main and divisible_by(step, self.checkpoint_every): checkpoint_num = step // self.checkpoint_every self.save(self.checkpoint_folder / f'mesh-autoencoder.ckpt.{checkpoint_num}.pt') self.wait() self.print('training complete') # mesh transformer trainer @add_wandb_tracker_contextmanager() class MeshTransformerTrainer(Module): @beartype def __init__( self,
# constants DEFAULT_DDP_KWARGS = DistributedDataParallelKwargs( find_unused_parameters = True ) # helper functions def exists(v): return v is not None def default(v, d): return v if exists(v) else d def divisible_by(num, den): return (num % den) == 0 def cycle(dl): while True: for data in dl: yield data def maybe_del(d: dict, *keys): for key in keys: if key not in d: continue del d[key] # autoencoder trainer @add_wandb_tracker_contextmanager() class MeshAutoencoderTrainer(Module): @beartype def __init__( self, model: MeshAutoencoder, dataset: Dataset, num_train_steps: int, batch_size: int, grad_accum_every: int, val_dataset: Optional[Dataset] = None, val_every: int = 100, val_num_batches: int = 5, learning_rate: float = 1e-4, weight_decay: float = 0., max_grad_norm: Optional[float] = None, ema_kwargs: dict = dict(), scheduler: Optional[Type[_LRScheduler]] = None, scheduler_kwargs: dict = dict(), accelerator_kwargs: dict = dict(), optimizer_kwargs: dict = dict(), checkpoint_every = 1000, checkpoint_folder = './checkpoints', data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) self.should_validate = exists(val_dataset) if self.should_validate: assert len(val_dataset) > 0, 'your validation dataset is empty' self.val_every = val_every self.val_num_batches = val_num_batches self.val_dataloader = DataLoader( val_dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) if hasattr(dataset, 'data_kwargs') and exists(dataset.data_kwargs): assert is_bearable(dataset.data_kwargs, List[str]) self.data_kwargs = dataset.data_kwargs else: self.data_kwargs = data_kwargs ( self.model, self.dataloader ) = self.accelerator.prepare( self.model, self.dataloader ) self.grad_accum_every = grad_accum_every self.num_train_steps = num_train_steps self.register_buffer('step', torch.tensor(0)) self.checkpoint_every = checkpoint_every self.checkpoint_folder = Path(checkpoint_folder) self.checkpoint_folder.mkdir(exist_ok = True, parents = True) @property def ema_tokenizer(self): return self.ema_model.ema_model def tokenize(self, *args, **kwargs): return self.ema_tokenizer.tokenize(*args, **kwargs) def log(self, **data_kwargs): self.accelerator.log(data_kwargs, step = self.step.item()) @property def device(self): return self.unwrapped_model.device @property def is_main(self): return self.accelerator.is_main_process @property def unwrapped_model(self): return self.accelerator.unwrap_model(self.model) @property def is_local_main(self): return self.accelerator.is_local_main_process def wait(self): return self.accelerator.wait_for_everyone() def print(self, msg): return self.accelerator.print(msg) def save(self, path, overwrite = True): path = Path(path) assert overwrite or not path.exists() pkg = dict( model = self.unwrapped_model.state_dict(), ema_model = self.ema_model.state_dict(), optimizer = self.optimizer.state_dict(), version = __version__, step = self.step.item(), config = self.unwrapped_model._config ) torch.save(pkg, str(path)) def load(self, path): path = Path(path) assert path.exists() pkg = torch.load(str(path)) if version.parse(__version__) != version.parse(pkg['version']): self.print(f'loading saved mesh autoencoder at version {pkg["version"]}, but current package version is {__version__}') self.model.load_state_dict(pkg['model']) self.ema_model.load_state_dict(pkg['ema_model']) self.optimizer.load_state_dict(pkg['optimizer']) self.step.copy_(pkg['step']) def next_data_to_forward_kwargs(self, dl_iter) -> dict: data = next(dl_iter) if isinstance(data, tuple): forward_kwargs = dict(zip(self.data_kwargs, data)) elif isinstance(data, dict): forward_kwargs = data maybe_del(forward_kwargs, 'texts', 'text_embeds') return forward_kwargs def forward(self): step = self.step.item() dl_iter = cycle(self.dataloader) if self.is_main and self.should_validate: val_dl_iter = cycle(self.val_dataloader) while step < self.num_train_steps: for i in range(self.grad_accum_every): is_last = i == (self.grad_accum_every - 1) maybe_no_sync = partial(self.accelerator.no_sync, self.model) if not is_last else nullcontext forward_kwargs = self.next_data_to_forward_kwargs(dl_iter) with self.accelerator.autocast(), maybe_no_sync(): total_loss, (recon_loss, commit_loss) = self.model( **forward_kwargs, return_loss_breakdown = True ) self.accelerator.backward(total_loss / self.grad_accum_every) self.print(f'recon loss: {recon_loss.item():.3f} | commit loss: {commit_loss.sum().item():.3f}') self.log( total_loss = total_loss.item(), commit_loss = commit_loss.sum().item(), recon_loss = recon_loss.item() ) self.optimizer.step() self.optimizer.zero_grad() step += 1 self.step.add_(1) self.wait() if self.is_main: self.ema_model.update() self.wait() if self.is_main and self.should_validate and divisible_by(step, self.val_every): total_val_recon_loss = 0. self.ema_model.eval() num_val_batches = self.val_num_batches * self.grad_accum_every for _ in range(num_val_batches): with self.accelerator.autocast(), torch.no_grad(): forward_kwargs = self.next_data_to_forward_kwargs(val_dl_iter) val_loss, (val_recon_loss, val_commit_loss) = self.ema_model( **forward_kwargs, return_loss_breakdown = True ) total_val_recon_loss += (val_recon_loss / num_val_batches) self.print(f'valid recon loss: {total_val_recon_loss:.3f}') self.log(val_loss = total_val_recon_loss) self.wait() if self.is_main and divisible_by(step, self.checkpoint_every): checkpoint_num = step // self.checkpoint_every self.save(self.checkpoint_folder / f'mesh-autoencoder.ckpt.{checkpoint_num}.pt') self.wait() self.print('training complete') # mesh transformer trainer @add_wandb_tracker_contextmanager() class MeshTransformerTrainer(Module): @beartype def __init__( self,
model: MeshTransformer,
3
2023-11-29 14:58:15+00:00
16k
EricGuo5513/momask-codes
train_res_transformer.py
[ { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512...
import os import torch import numpy as np from torch.utils.data import DataLoader from os.path import join as pjoin from models.mask_transformer.transformer import ResidualTransformer from models.mask_transformer.transformer_trainer import ResidualTransformerTrainer from models.vq.model import RVQVAE from options.train_option import TrainT2MOptions from utils.plot_script import plot_3d_motion from utils.motion_process import recover_from_ric from utils.get_opt import get_opt from utils.fixseed import fixseed from utils.paramUtil import t2m_kinematic_chain, kit_kinematic_chain from data.t2m_dataset import Text2MotionDataset from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper
13,813
vq_opt = get_opt(opt_path, opt.device) vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {opt.vq_name}') vq_model.to(opt.device) return vq_model, vq_opt if __name__ == '__main__': parser = TrainT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) opt.model_dir = pjoin(opt.save_root, 'model') # opt.meta_dir = pjoin(opt.save_root, 'meta') opt.eval_dir = pjoin(opt.save_root, 'animation') opt.log_dir = pjoin('./log/res/', opt.dataset_name, opt.name) os.makedirs(opt.model_dir, exist_ok=True) # os.makedirs(opt.meta_dir, exist_ok=True) os.makedirs(opt.eval_dir, exist_ok=True) os.makedirs(opt.log_dir, exist_ok=True) if opt.dataset_name == 't2m': opt.data_root = './dataset/HumanML3D' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 22 opt.max_motion_len = 55 dim_pose = 263 radius = 4 fps = 20 kinematic_chain = t2m_kinematic_chain dataset_opt_path = './checkpoints/t2m/Comp_v6_KLD005/opt.txt' elif opt.dataset_name == 'kit': #TODO opt.data_root = './dataset/KIT-ML' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 21 radius = 240 * 8 fps = 12.5 dim_pose = 251 opt.max_motion_len = 55 kinematic_chain = kit_kinematic_chain dataset_opt_path = './checkpoints/kit/Comp_v6_KLD005/opt.txt' else: raise KeyError('Dataset Does Not Exist') opt.text_dir = pjoin(opt.data_root, 'texts') vq_model, vq_opt = load_vq_model() clip_version = 'ViT-B/32' opt.num_tokens = vq_opt.nb_code opt.num_quantizers = vq_opt.num_quantizers # if opt.is_v2: res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=opt.latent_dim, ff_size=opt.ff_size, num_layers=opt.n_layers, num_heads=opt.n_heads, dropout=opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=opt.share_weight, clip_version=clip_version, opt=opt) # else: # res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, # cond_mode='text', # latent_dim=opt.latent_dim, # ff_size=opt.ff_size, # num_layers=opt.n_layers, # num_heads=opt.n_heads, # dropout=opt.dropout, # clip_dim=512, # shared_codebook=vq_opt.shared_codebook, # cond_drop_prob=opt.cond_drop_prob, # # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, # clip_version=clip_version, # opt=opt) all_params = 0 pc_transformer = sum(param.numel() for param in res_transformer.parameters_wo_clip()) print(res_transformer) # print("Total parameters of t2m_transformer net: {:.2f}M".format(pc_transformer / 1000_000)) all_params += pc_transformer print('Total parameters of all models: {:.2f}M'.format(all_params / 1000_000)) mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'std.npy')) train_split_file = pjoin(opt.data_root, 'train.txt') val_split_file = pjoin(opt.data_root, 'val.txt')
def plot_t2m(data, save_dir, captions, m_lengths): data = train_dataset.inv_transform(data) # print(ep_curves.shape) for i, (caption, joint_data) in enumerate(zip(captions, data)): joint_data = joint_data[:m_lengths[i]] joint = recover_from_ric(torch.from_numpy(joint_data).float(), opt.joints_num).numpy() save_path = pjoin(save_dir, '%02d.mp4'%i) # print(joint.shape) plot_3d_motion(save_path, kinematic_chain, joint, title=caption, fps=20) def load_vq_model(): opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_opt = get_opt(opt_path, opt.device) vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {opt.vq_name}') vq_model.to(opt.device) return vq_model, vq_opt if __name__ == '__main__': parser = TrainT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) opt.model_dir = pjoin(opt.save_root, 'model') # opt.meta_dir = pjoin(opt.save_root, 'meta') opt.eval_dir = pjoin(opt.save_root, 'animation') opt.log_dir = pjoin('./log/res/', opt.dataset_name, opt.name) os.makedirs(opt.model_dir, exist_ok=True) # os.makedirs(opt.meta_dir, exist_ok=True) os.makedirs(opt.eval_dir, exist_ok=True) os.makedirs(opt.log_dir, exist_ok=True) if opt.dataset_name == 't2m': opt.data_root = './dataset/HumanML3D' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 22 opt.max_motion_len = 55 dim_pose = 263 radius = 4 fps = 20 kinematic_chain = t2m_kinematic_chain dataset_opt_path = './checkpoints/t2m/Comp_v6_KLD005/opt.txt' elif opt.dataset_name == 'kit': #TODO opt.data_root = './dataset/KIT-ML' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 21 radius = 240 * 8 fps = 12.5 dim_pose = 251 opt.max_motion_len = 55 kinematic_chain = kit_kinematic_chain dataset_opt_path = './checkpoints/kit/Comp_v6_KLD005/opt.txt' else: raise KeyError('Dataset Does Not Exist') opt.text_dir = pjoin(opt.data_root, 'texts') vq_model, vq_opt = load_vq_model() clip_version = 'ViT-B/32' opt.num_tokens = vq_opt.nb_code opt.num_quantizers = vq_opt.num_quantizers # if opt.is_v2: res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=opt.latent_dim, ff_size=opt.ff_size, num_layers=opt.n_layers, num_heads=opt.n_heads, dropout=opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=opt.share_weight, clip_version=clip_version, opt=opt) # else: # res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, # cond_mode='text', # latent_dim=opt.latent_dim, # ff_size=opt.ff_size, # num_layers=opt.n_layers, # num_heads=opt.n_heads, # dropout=opt.dropout, # clip_dim=512, # shared_codebook=vq_opt.shared_codebook, # cond_drop_prob=opt.cond_drop_prob, # # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, # clip_version=clip_version, # opt=opt) all_params = 0 pc_transformer = sum(param.numel() for param in res_transformer.parameters_wo_clip()) print(res_transformer) # print("Total parameters of t2m_transformer net: {:.2f}M".format(pc_transformer / 1000_000)) all_params += pc_transformer print('Total parameters of all models: {:.2f}M'.format(all_params / 1000_000)) mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'std.npy')) train_split_file = pjoin(opt.data_root, 'train.txt') val_split_file = pjoin(opt.data_root, 'val.txt')
train_dataset = Text2MotionDataset(opt, mean, std, train_split_file)
9
2023-11-29 19:21:27+00:00
16k
dvlab-research/LLMGA
llmga/diffusers/src/diffusers/schedulers/scheduling_dpmsolver_singlestep.py
[ { "identifier": "ConfigMixin", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [...
import math import numpy as np import torch from typing import List, Optional, Tuple, Union from ..configuration_utils import ConfigMixin, register_to_config from ..utils import deprecate, logging from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
12,063
self.order_list = self.get_order_list(num_inference_steps) # add an index counter for schedulers that allow duplicated timesteps self._step_index = None # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(sigma) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: """Constructs the noise schedule of Karras et al. (2022).""" sigma_min: float = in_sigmas[-1].item() sigma_max: float = in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output( self, model_output: torch.FloatTensor, *args, sample: torch.FloatTensor = None, **kwargs, ) -> torch.FloatTensor: """ Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. <Tip> The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise prediction and data prediction models. </Tip> Args: model_output (`torch.FloatTensor`): The direct output from the learned diffusion model. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. Returns: `torch.FloatTensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyward argument") if timestep is not None:
# Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DPMSolverSinglestepScheduler(SchedulerMixin, ConfigMixin): """ `DPMSolverSinglestepScheduler` is a fast dedicated high-order solver for diffusion ODEs. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. solver_order (`int`, defaults to 2): The DPMSolver order which can be `1` or `2` or `3`. It is recommended to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True` and `algorithm_type="dpmsolver++"`. algorithm_type (`str`, defaults to `dpmsolver++`): Algorithm type for the solver; can be `dpmsolver`, `dpmsolver++`, `sde-dpmsolver` or `sde-dpmsolver++`. The `dpmsolver` type implements the algorithms in the [DPMSolver](https://huggingface.co/papers/2206.00927) paper, and the `dpmsolver++` type implements the algorithms in the [DPMSolver++](https://huggingface.co/papers/2211.01095) paper. It is recommended to use `dpmsolver++` or `sde-dpmsolver++` with `solver_order=2` for guided sampling like in Stable Diffusion. solver_type (`str`, defaults to `midpoint`): Solver type for the second-order solver; can be `midpoint` or `heun`. The solver type slightly affects the sample quality, especially for a small number of steps. It is recommended to use `midpoint` solvers. lower_order_final (`bool`, defaults to `True`): Whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. This can stabilize the sampling of DPMSolver for steps < 15, especially for steps <= 10. use_karras_sigmas (`bool`, *optional*, defaults to `False`): Whether to use Karras sigmas for step sizes in the noise schedule during the sampling process. If `True`, the sigmas are determined according to a sequence of noise levels {σi}. lambda_min_clipped (`float`, defaults to `-inf`): Clipping threshold for the minimum value of `lambda(t)` for numerical stability. This is critical for the cosine (`squaredcos_cap_v2`) noise schedule. variance_type (`str`, *optional*): Set to "learned" or "learned_range" for diffusion models that predict variance. If set, the model's output contains the predicted Gaussian variance. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[np.ndarray] = None, solver_order: int = 2, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, sample_max_value: float = 1.0, algorithm_type: str = "dpmsolver++", solver_type: str = "midpoint", lower_order_final: bool = True, use_karras_sigmas: Optional[bool] = False, lambda_min_clipped: float = -float("inf"), variance_type: Optional[str] = None, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = ( torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # Currently we only support VP-type noise schedule self.alpha_t = torch.sqrt(self.alphas_cumprod) self.sigma_t = torch.sqrt(1 - self.alphas_cumprod) self.lambda_t = torch.log(self.alpha_t) - torch.log(self.sigma_t) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # settings for DPM-Solver if algorithm_type not in ["dpmsolver", "dpmsolver++"]: if algorithm_type == "deis": self.register_to_config(algorithm_type="dpmsolver++") else: raise NotImplementedError(f"{algorithm_type} does is not implemented for {self.__class__}") if solver_type not in ["midpoint", "heun"]: if solver_type in ["logrho", "bh1", "bh2"]: self.register_to_config(solver_type="midpoint") else: raise NotImplementedError(f"{solver_type} does is not implemented for {self.__class__}") # setable values self.num_inference_steps = None timesteps = np.linspace(0, num_train_timesteps - 1, num_train_timesteps, dtype=np.float32)[::-1].copy() self.timesteps = torch.from_numpy(timesteps) self.model_outputs = [None] * solver_order self.sample = None self.order_list = self.get_order_list(num_train_timesteps) self._step_index = None def get_order_list(self, num_inference_steps: int) -> List[int]: """ Computes the solver order at each time step. Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. """ steps = num_inference_steps order = self.config.solver_order if self.config.lower_order_final: if order == 3: if steps % 3 == 0: orders = [1, 2, 3] * (steps // 3 - 1) + [1, 2] + [1] elif steps % 3 == 1: orders = [1, 2, 3] * (steps // 3) + [1] else: orders = [1, 2, 3] * (steps // 3) + [1, 2] elif order == 2: if steps % 2 == 0: orders = [1, 2] * (steps // 2) else: orders = [1, 2] * (steps // 2) + [1] elif order == 1: orders = [1] * steps else: if order == 3: orders = [1, 2, 3] * (steps // 3) elif order == 2: orders = [1, 2] * (steps // 2) elif order == 1: orders = [1] * steps return orders @property def step_index(self): """ The index counter for current timestep. It will increae 1 after each scheduler step. """ return self._step_index def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. device (`str` or `torch.device`, *optional*): The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. """ self.num_inference_steps = num_inference_steps # Clipping the minimum of all lambda(t) for numerical stability. # This is critical for cosine (squaredcos_cap_v2) noise schedule. clipped_idx = torch.searchsorted(torch.flip(self.lambda_t, [0]), self.config.lambda_min_clipped) timesteps = ( np.linspace(0, self.config.num_train_timesteps - 1 - clipped_idx, num_inference_steps + 1) .round()[::-1][:-1] .copy() .astype(np.int64) ) sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) if self.config.use_karras_sigmas: log_sigmas = np.log(sigmas) sigmas = np.flip(sigmas).copy() sigmas = self._convert_to_karras(in_sigmas=sigmas, num_inference_steps=num_inference_steps) timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]).round() sigmas = np.concatenate([sigmas, sigmas[-1:]]).astype(np.float32) else: sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) sigma_last = ((1 - self.alphas_cumprod[0]) / self.alphas_cumprod[0]) ** 0.5 sigmas = np.concatenate([sigmas, [sigma_last]]).astype(np.float32) self.sigmas = torch.from_numpy(sigmas).to(device=device) self.timesteps = torch.from_numpy(timesteps).to(device=device, dtype=torch.int64) self.model_outputs = [None] * self.config.solver_order self.sample = None if not self.config.lower_order_final and num_inference_steps % self.config.solver_order != 0: logger.warn( "Changing scheduler {self.config} to have `lower_order_final` set to True to handle uneven amount of inference steps. Please make sure to always use an even number of `num_inference steps when using `lower_order_final=True`." ) self.register_to_config(lower_order_final=True) self.order_list = self.get_order_list(num_inference_steps) # add an index counter for schedulers that allow duplicated timesteps self._step_index = None # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, *remaining_dims = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * np.prod(remaining_dims)) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, *remaining_dims) sample = sample.to(dtype) return sample # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._sigma_to_t def _sigma_to_t(self, sigma, log_sigmas): # get log sigma log_sigma = np.log(sigma) # get distribution dists = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) high_idx = low_idx + 1 low = log_sigmas[low_idx] high = log_sigmas[high_idx] # interpolate sigmas w = (low - log_sigma) / (low - high) w = np.clip(w, 0, 1) # transform interpolation to time range t = (1 - w) * low_idx + w * high_idx t = t.reshape(sigma.shape) return t # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler._sigma_to_alpha_sigma_t def _sigma_to_alpha_sigma_t(self, sigma): alpha_t = 1 / ((sigma**2 + 1) ** 0.5) sigma_t = sigma * alpha_t return alpha_t, sigma_t # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler._convert_to_karras def _convert_to_karras(self, in_sigmas: torch.FloatTensor, num_inference_steps) -> torch.FloatTensor: """Constructs the noise schedule of Karras et al. (2022).""" sigma_min: float = in_sigmas[-1].item() sigma_max: float = in_sigmas[0].item() rho = 7.0 # 7.0 is the value used in the paper ramp = np.linspace(0, 1, num_inference_steps) min_inv_rho = sigma_min ** (1 / rho) max_inv_rho = sigma_max ** (1 / rho) sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas def convert_model_output( self, model_output: torch.FloatTensor, *args, sample: torch.FloatTensor = None, **kwargs, ) -> torch.FloatTensor: """ Convert the model output to the corresponding type the DPMSolver/DPMSolver++ algorithm needs. DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to discretize an integral of the data prediction model. <Tip> The algorithm and model type are decoupled. You can use either DPMSolver or DPMSolver++ for both noise prediction and data prediction models. </Tip> Args: model_output (`torch.FloatTensor`): The direct output from the learned diffusion model. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. Returns: `torch.FloatTensor`: The converted model output. """ timestep = args[0] if len(args) > 0 else kwargs.pop("timestep", None) if sample is None: if len(args) > 1: sample = args[1] else: raise ValueError("missing `sample` as a required keyward argument") if timestep is not None:
deprecate(
3
2023-11-27 18:46:55+00:00
16k
sherwinbahmani/4dfy
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,286
self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-11-29 05:15:56+00:00
16k
rlawjdghek/StableVITON
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torchvision.transforms as T import random import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from torchvision.transforms.functional import resize from diffusers.models.autoencoder_kl import AutoencoderKLOutput from diffusers.models.vae import DecoderOutput from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like, zero_module, conv_nd from ldm.models.diffusion.ddim import DDIMSampler
13,207
assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, l_cond_simple_weight=1.0, l_cond_recon_weight=1.0, **kwargs ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.unet_config = unet_config self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.imagenet_norm = T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.l_cond_simple_weight = l_cond_simple_weight self.l_cond_recon_weight = l_cond_recon_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
2
2023-12-02 05:56:58+00:00
16k
AIFSH/NativeSpeaker
src/core.py
[ { "identifier": "HandleLog", "path": "src/log_helper.py", "snippet": "class HandleLog:\n \"\"\"\n 先创建日志记录器(logging.getLogger),然后再设置日志级别(logger.setLevel),\n 接着再创建日志文件,也就是日志保存的地方(logging.FileHandler),然后再设置日志格式(logging.Formatter),\n 最后再将日志处理程序记录到记录器(addHandler)\n \"\"\"\n\n def __init__(s...
import os import torch import soundfile as sf import gc; gc.collect(); torch.cuda.empty_cache(); del cloner import gc; gc.collect(); torch.cuda.empty_cache(); del diarize_model import gc; gc.collect(); torch.cuda.empty_cache(); del whisper from typing import Any from tqdm import tqdm from src.log_helper import HandleLog from moviepy.editor import VideoFileClip,concatenate_videoclips from pathlib import Path from pydub import AudioSegment from src.audio_bgm_split import AudioProcess from src.voice_clone import VoiceCloner from src.temp_manager import TempFileManager from src.translator import Translator from src.lipsync import LipSync from src.upscale import Upscale from src.nfsw import analyse_video from src.third_part.whisperx import load_model,load_audio,DiarizationPipeline
12,328
seg_len = len(whispher_segments) cloner = VoiceCloner(self.xt_version_name) root_path = Path(self.output_file).parent zimu_txt = os.path.join(root_path,"zimu.txt") for i, segment in tqdm(enumerate(whispher_segments), desc="voice cloning", total=seg_len): start = segment['start'] * 1000 end = segment['end'] * 1000 text_list = segment['text_list'] if i == 0: vocal_cloned_audio += org_vocal[:start] bgm_audio_extend += bgm_audio[:start] video_extend_list.append(org_video_clip.subclip(0, start / 1000)) total_cloned_vocal = AudioSegment.silent(0) if len(text_list) > 0: for src_text in text_list: dst_text = self.translotor(src_text,src_lang_code,self.lang_code) with open(zimu_txt,mode="a",encoding="utf-8",newline="\n") as w: w.write(src_text) w.write("\n") w.write(dst_text) w.write("\n") cloned_vocal_path = cloner(text=dst_text, lang_code=self.lang_code, speaker_wav=[segment['wav'],speakers_wav[segment['speaker']]]) cloned_vocal = AudioSegment.from_file(cloned_vocal_path) total_cloned_vocal += cloned_vocal else: logger.info(f'no sound there') total_cloned_vocal = AudioSegment.silent(end - start) vocal_cloned_audio += total_cloned_vocal tmp_bgm_audio = bgm_audio[start:end] bgm_audio_extend += self.bgm_map_vocal(tmp_bgm_audio, total_cloned_vocal) tmp_video_clip = org_video_clip.subclip(start/1000, end/1000) tmp_video_clip = self.video_map_vocal(tmp_video_clip, total_cloned_vocal) video_extend_list.append(tmp_video_clip) if i < seg_len - 1: # duration vocal_cloned_audio += org_vocal[end:whispher_segments[i+1]['start']*1000] bgm_audio_extend += bgm_audio[end:whispher_segments[i+1]['start']*1000] video_extend_list.append(org_video_clip.subclip(end/1000, whispher_segments[i+1]['start'])) if i == seg_len - 1: # duration vocal_cloned_audio += org_vocal[end:] bgm_audio_extend += bgm_audio[end:] video_extend_list.append(org_video_clip.subclip(end/1000)) vocal_cloned_path = os.path.join(root_path,"vocal_cloned.wav") vocal_cloned_audio.export(vocal_cloned_path, format="wav") logger.info("vocal_cloned.wav saved in {}, you can check it".format(root_path)) bgm_extend_path = os.path.join(root_path,"bgm_extend.wav") bgm_audio_extend.export(bgm_extend_path, format="wav") logger.info("bgm_extend.wav saved in {}, you can check it".format(root_path)) voice_cloned_path = os.path.join(root_path,"voice_cloned.wav") self.combie_audio(vocal_cloned_audio,bgm_audio_extend,voice_cloned_path) logger.info("voice_cloned.wav saved in {}, you can check it".format(root_path)) video_extend_path = os.path.join(root_path,"video_extend.mp4") video_extended = concatenate_videoclips(video_extend_list) video_extended.write_videofile(video_extend_path,fps=25,audio=False) logger.info("video_extend.mp4 saved in {}, you can check it".format(root_path)) # delete model if low on GPU resources logger.critical("[Step 5] Wav2Lip by vocal_cloned.wav and video_extend.mp4") lipsync = LipSync(self.model_name) lipsync(video_extend_path,vocal_cloned_path,self.output_file,voice_cloned_path) logger.critical("[Step 6] Upscale output video last step") upscaler = Upscale() upscale_workplace_path = os.path.join(root_path,"upscale_workplace") upscaler(input_path = self.output_file,output_path=upscale_workplace_path,audio=voice_cloned_path) self.temp_manager.cleanup() def combie_audio(self,vocal_audio:AudioSegment,bgm_audio:AudioSegment,file_path): new_audio = vocal_audio.overlay(bgm_audio) new_audio.export(file_path, format="wav") def bgm_map_vocal(self,bgm_audio:AudioSegment,vocal_audio:AudioSegment): audio_duration = vocal_audio.duration_seconds ratio = audio_duration / bgm_audio.duration_seconds print("audio.duration_seconds / bgm.duration_seconds = {}".format(ratio)) tmp_bgm_path = self.temp_manager.create_temp_file(suffix='.wav').name bgm_audio.export(tmp_bgm_path, format="wav") bgm_path = self.temp_manager.create_temp_file(suffix='.wav').name y,sr = sf.read(tmp_bgm_path) sf.write(bgm_path,y,int(sr*ratio)) bgm_extended = AudioSegment.from_file(bgm_path) return bgm_extended[:audio_duration * 1000] def video_map_vocal(self,vido_clip:VideoFileClip,vocal_audio:AudioSegment): audio_duration = vocal_audio.duration_seconds video_duration = vido_clip.duration ratio = video_duration / audio_duration print("video_duration / audio_duration =ratio:{}".format(ratio)) new_video = vido_clip.fl_time(lambda t: ratio*t,apply_to=['mask', 'audio']) new_video1 = new_video.set_duration(audio_duration) new_video2 = new_video1.set_fps(new_video1.fps / video_duration * audio_duration) return new_video2.subclip(0,audio_duration) def speech_to_text(self, vocal_file):
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice") audio_process = AudioProcess(15) vocal_file, bgm_file = audio_process.split(org_voice_path) logger.critical("[Step 3] whisperx from speech to text") whispher_segments, src_lang_code, speakers_wav = self.speech_to_text(vocal_file) logger.critical("[Step 4] translate,text to speech,video and voice_cloned aligment") vocal_cloned_audio = AudioSegment.silent(0) bgm_audio_extend = AudioSegment.silent(0) video_extend_list = [] org_vocal = AudioSegment.from_file(vocal_file) bgm_audio = AudioSegment.from_file(bgm_file) seg_len = len(whispher_segments) cloner = VoiceCloner(self.xt_version_name) root_path = Path(self.output_file).parent zimu_txt = os.path.join(root_path,"zimu.txt") for i, segment in tqdm(enumerate(whispher_segments), desc="voice cloning", total=seg_len): start = segment['start'] * 1000 end = segment['end'] * 1000 text_list = segment['text_list'] if i == 0: vocal_cloned_audio += org_vocal[:start] bgm_audio_extend += bgm_audio[:start] video_extend_list.append(org_video_clip.subclip(0, start / 1000)) total_cloned_vocal = AudioSegment.silent(0) if len(text_list) > 0: for src_text in text_list: dst_text = self.translotor(src_text,src_lang_code,self.lang_code) with open(zimu_txt,mode="a",encoding="utf-8",newline="\n") as w: w.write(src_text) w.write("\n") w.write(dst_text) w.write("\n") cloned_vocal_path = cloner(text=dst_text, lang_code=self.lang_code, speaker_wav=[segment['wav'],speakers_wav[segment['speaker']]]) cloned_vocal = AudioSegment.from_file(cloned_vocal_path) total_cloned_vocal += cloned_vocal else: logger.info(f'no sound there') total_cloned_vocal = AudioSegment.silent(end - start) vocal_cloned_audio += total_cloned_vocal tmp_bgm_audio = bgm_audio[start:end] bgm_audio_extend += self.bgm_map_vocal(tmp_bgm_audio, total_cloned_vocal) tmp_video_clip = org_video_clip.subclip(start/1000, end/1000) tmp_video_clip = self.video_map_vocal(tmp_video_clip, total_cloned_vocal) video_extend_list.append(tmp_video_clip) if i < seg_len - 1: # duration vocal_cloned_audio += org_vocal[end:whispher_segments[i+1]['start']*1000] bgm_audio_extend += bgm_audio[end:whispher_segments[i+1]['start']*1000] video_extend_list.append(org_video_clip.subclip(end/1000, whispher_segments[i+1]['start'])) if i == seg_len - 1: # duration vocal_cloned_audio += org_vocal[end:] bgm_audio_extend += bgm_audio[end:] video_extend_list.append(org_video_clip.subclip(end/1000)) vocal_cloned_path = os.path.join(root_path,"vocal_cloned.wav") vocal_cloned_audio.export(vocal_cloned_path, format="wav") logger.info("vocal_cloned.wav saved in {}, you can check it".format(root_path)) bgm_extend_path = os.path.join(root_path,"bgm_extend.wav") bgm_audio_extend.export(bgm_extend_path, format="wav") logger.info("bgm_extend.wav saved in {}, you can check it".format(root_path)) voice_cloned_path = os.path.join(root_path,"voice_cloned.wav") self.combie_audio(vocal_cloned_audio,bgm_audio_extend,voice_cloned_path) logger.info("voice_cloned.wav saved in {}, you can check it".format(root_path)) video_extend_path = os.path.join(root_path,"video_extend.mp4") video_extended = concatenate_videoclips(video_extend_list) video_extended.write_videofile(video_extend_path,fps=25,audio=False) logger.info("video_extend.mp4 saved in {}, you can check it".format(root_path)) # delete model if low on GPU resources logger.critical("[Step 5] Wav2Lip by vocal_cloned.wav and video_extend.mp4") lipsync = LipSync(self.model_name) lipsync(video_extend_path,vocal_cloned_path,self.output_file,voice_cloned_path) logger.critical("[Step 6] Upscale output video last step") upscaler = Upscale() upscale_workplace_path = os.path.join(root_path,"upscale_workplace") upscaler(input_path = self.output_file,output_path=upscale_workplace_path,audio=voice_cloned_path) self.temp_manager.cleanup() def combie_audio(self,vocal_audio:AudioSegment,bgm_audio:AudioSegment,file_path): new_audio = vocal_audio.overlay(bgm_audio) new_audio.export(file_path, format="wav") def bgm_map_vocal(self,bgm_audio:AudioSegment,vocal_audio:AudioSegment): audio_duration = vocal_audio.duration_seconds ratio = audio_duration / bgm_audio.duration_seconds print("audio.duration_seconds / bgm.duration_seconds = {}".format(ratio)) tmp_bgm_path = self.temp_manager.create_temp_file(suffix='.wav').name bgm_audio.export(tmp_bgm_path, format="wav") bgm_path = self.temp_manager.create_temp_file(suffix='.wav').name y,sr = sf.read(tmp_bgm_path) sf.write(bgm_path,y,int(sr*ratio)) bgm_extended = AudioSegment.from_file(bgm_path) return bgm_extended[:audio_duration * 1000] def video_map_vocal(self,vido_clip:VideoFileClip,vocal_audio:AudioSegment): audio_duration = vocal_audio.duration_seconds video_duration = vido_clip.duration ratio = video_duration / audio_duration print("video_duration / audio_duration =ratio:{}".format(ratio)) new_video = vido_clip.fl_time(lambda t: ratio*t,apply_to=['mask', 'audio']) new_video1 = new_video.set_duration(audio_duration) new_video2 = new_video1.set_fps(new_video1.fps / video_duration * audio_duration) return new_video2.subclip(0,audio_duration) def speech_to_text(self, vocal_file):
vocal_audio = load_audio(vocal_file)
9
2023-12-01 12:23:19+00:00
16k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T...
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
14,058
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple):
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
11
2023-11-29 07:10:39+00:00
16k
emdgroup/baybe
examples/Backtesting/impute_mode.py
[ { "identifier": "Campaign", "path": "baybe/campaign.py", "snippet": "class Campaign(SerialMixin):\n \"\"\"Main class for interaction with BayBE.\n\n Campaigns define and record an experimentation process, i.e. the execution of a\n series of measurements and the iterative sequence of events invo...
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from baybe import Campaign from baybe.objective import Objective from baybe.parameters import NumericalDiscreteParameter, SubstanceParameter from baybe.recommenders import RandomRecommender from baybe.searchspace import SearchSpace from baybe.simulation import simulate_scenarios from baybe.strategies import TwoPhaseStrategy from baybe.targets import NumericalTarget
11,199
### Example for full simulation loop using a table-based lookup mechanism with incomplete data # This example shows a simulation for a direct arylation where not all combinations were measured. # This allows us to access information about previously conducted experiments from .xlsx-files. # This examples assumes some basic familiarity with using BayBE and the lookup mechanism. # We refer to [`campaign`](./../Basics/campaign.md) for a more basic example resp. # to [`full_lookup`](./full_lookup.md) for details on the lookup mechanism. #### Necessary imports for this example #### Parameters for a full simulation loop # For the full simulation, we need to define some additional parameters. # These are the number of Monte Carlo runs and the number of experiments to be conducted per run. N_MC_ITERATIONS = 2 N_DOE_ITERATIONS = 5 #### Lookup functionality and data creation # See [`full_lookup`](./full_lookup.md) for details. try: lookup = pd.read_excel("./lookup_withmissing.xlsx") except FileNotFoundError: try: lookup = pd.read_excel("examples/Backtesting/lookup_withmissing.xlsx") except FileNotFoundError as e: print(e) # As usual, we set up some experiment. # Note that we now need to ensure that the names fit the names in the provided .xlsx file! dict_solvent = { "DMAc": r"CC(N(C)C)=O", "Butyornitrile": r"CCCC#N", "Butyl Ester": r"CCCCOC(C)=O", "p-Xylene": r"CC1=CC=C(C)C=C1", } dict_base = { "Potassium acetate": r"O=C([O-])C.[K+]", "Potassium pivalate": r"O=C([O-])C(C)(C)C.[K+]", "Cesium acetate": r"O=C([O-])C.[Cs+]", "Cesium pivalate": r"O=C([O-])C(C)(C)C.[Cs+]", } dict_ligand = { "BrettPhos": r"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)C4CCCCC4)C(OC)=" "CC=C2OC", "Di-tert-butylphenylphosphine": r"CC(C)(C)P(C1=CC=CC=C1)C(C)(C)C", "(t-Bu)PhCPhos": r"CN(C)C1=CC=CC(N(C)C)=C1C2=CC=CC=C2P(C(C)(C)C)C3=CC=CC=C3", "Tricyclohexylphosphine": r"P(C1CCCCC1)(C2CCCCC2)C3CCCCC3", "PPh3": r"P(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3", "XPhos": r"CC(C1=C(C2=CC=CC=C2P(C3CCCCC3)C4CCCCC4)C(C(C)C)=CC(C(C)C)=C1)C", "P(2-furyl)3": r"P(C1=CC=CO1)(C2=CC=CO2)C3=CC=CO3", "Methyldiphenylphosphine": r"CP(C1=CC=CC=C1)C2=CC=CC=C2", "1268824-69-6": r"CC(OC1=C(P(C2CCCCC2)C3CCCCC3)C(OC(C)C)=CC=C1)C", "JackiePhos": r"FC(F)(F)C1=CC(P(C2=C(C3=C(C(C)C)C=C(C(C)C)C=C3C(C)C)C(OC)=CC=C2OC)" r"C4=CC(C(F)(F)F)=CC(C(F)(F)F)=C4)=CC(C(F)(F)F)=C1", "SCHEMBL15068049": r"C[C@]1(O2)O[C@](C[C@]2(C)P3C4=CC=CC=C4)(C)O[C@]3(C)C1", "Me2PPh": r"CP(C)C1=CC=CC=C1", } #### Creating the searchspace and the objective # Here, we create the parameter objects, the searchspace and the objective. solvent = SubstanceParameter(name="Solvent", data=dict_solvent, encoding="MORDRED") base = SubstanceParameter(name="Base", data=dict_base, encoding="MORDRED") ligand = SubstanceParameter(name="Ligand", data=dict_ligand, encoding="MORDRED")
### Example for full simulation loop using a table-based lookup mechanism with incomplete data # This example shows a simulation for a direct arylation where not all combinations were measured. # This allows us to access information about previously conducted experiments from .xlsx-files. # This examples assumes some basic familiarity with using BayBE and the lookup mechanism. # We refer to [`campaign`](./../Basics/campaign.md) for a more basic example resp. # to [`full_lookup`](./full_lookup.md) for details on the lookup mechanism. #### Necessary imports for this example #### Parameters for a full simulation loop # For the full simulation, we need to define some additional parameters. # These are the number of Monte Carlo runs and the number of experiments to be conducted per run. N_MC_ITERATIONS = 2 N_DOE_ITERATIONS = 5 #### Lookup functionality and data creation # See [`full_lookup`](./full_lookup.md) for details. try: lookup = pd.read_excel("./lookup_withmissing.xlsx") except FileNotFoundError: try: lookup = pd.read_excel("examples/Backtesting/lookup_withmissing.xlsx") except FileNotFoundError as e: print(e) # As usual, we set up some experiment. # Note that we now need to ensure that the names fit the names in the provided .xlsx file! dict_solvent = { "DMAc": r"CC(N(C)C)=O", "Butyornitrile": r"CCCC#N", "Butyl Ester": r"CCCCOC(C)=O", "p-Xylene": r"CC1=CC=C(C)C=C1", } dict_base = { "Potassium acetate": r"O=C([O-])C.[K+]", "Potassium pivalate": r"O=C([O-])C(C)(C)C.[K+]", "Cesium acetate": r"O=C([O-])C.[Cs+]", "Cesium pivalate": r"O=C([O-])C(C)(C)C.[Cs+]", } dict_ligand = { "BrettPhos": r"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)C4CCCCC4)C(OC)=" "CC=C2OC", "Di-tert-butylphenylphosphine": r"CC(C)(C)P(C1=CC=CC=C1)C(C)(C)C", "(t-Bu)PhCPhos": r"CN(C)C1=CC=CC(N(C)C)=C1C2=CC=CC=C2P(C(C)(C)C)C3=CC=CC=C3", "Tricyclohexylphosphine": r"P(C1CCCCC1)(C2CCCCC2)C3CCCCC3", "PPh3": r"P(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3", "XPhos": r"CC(C1=C(C2=CC=CC=C2P(C3CCCCC3)C4CCCCC4)C(C(C)C)=CC(C(C)C)=C1)C", "P(2-furyl)3": r"P(C1=CC=CO1)(C2=CC=CO2)C3=CC=CO3", "Methyldiphenylphosphine": r"CP(C1=CC=CC=C1)C2=CC=CC=C2", "1268824-69-6": r"CC(OC1=C(P(C2CCCCC2)C3CCCCC3)C(OC(C)C)=CC=C1)C", "JackiePhos": r"FC(F)(F)C1=CC(P(C2=C(C3=C(C(C)C)C=C(C(C)C)C=C3C(C)C)C(OC)=CC=C2OC)" r"C4=CC(C(F)(F)F)=CC(C(F)(F)F)=C4)=CC(C(F)(F)F)=C1", "SCHEMBL15068049": r"C[C@]1(O2)O[C@](C[C@]2(C)P3C4=CC=CC=C4)(C)O[C@]3(C)C1", "Me2PPh": r"CP(C)C1=CC=CC=C1", } #### Creating the searchspace and the objective # Here, we create the parameter objects, the searchspace and the objective. solvent = SubstanceParameter(name="Solvent", data=dict_solvent, encoding="MORDRED") base = SubstanceParameter(name="Base", data=dict_base, encoding="MORDRED") ligand = SubstanceParameter(name="Ligand", data=dict_ligand, encoding="MORDRED")
temperature = NumericalDiscreteParameter(
2
2023-11-27 17:02:40+00:00
16k
UX-Decoder/LLaVA-Grounding
llava/model/language_model/llava_llama_gd.py
[ { "identifier": "LlavaMetaModel", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config...
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM, LlavaMetaForCausalLM_gd,LlavaMetaForCausalLM_gd_interactive import torch import torch.nn as nn import transformers
12,155
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava"
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
0
2023-12-04 10:59:21+00:00
16k
daveredrum/SceneTex
models/pipeline/texture_pipeline.py
[ { "identifier": "TextureMesh", "path": "models/modules/meshes.py", "snippet": "class TextureMesh(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n self.num_...
import random import wandb import json import os import time import torch import torch.nn as nn import torch.nn.functional as F import torchvision import numpy as np import pytorch_lightning as pl import matplotlib.pyplot as plt import sys import open_clip from torch.optim import Adam, AdamW from torch.optim.lr_scheduler import LinearLR from omegaconf import OmegaConf from tqdm import tqdm from omegaconf import OmegaConf from PIL import Image from copy import deepcopy from pathlib import Path from pytorch3d.io import ( load_obj, load_objs_as_meshes ) from pytorch3d.renderer import TexturesUV from pytorch3d.ops import interpolate_face_attributes from models.modules import TextureMesh, Studio, Guidance
10,858
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self): self.studio = Studio(self.config, self.device) def _init_mesh(self): self.texture_mesh = TextureMesh(self.config, self.device) def _init_guidance(self):
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self): self.studio = Studio(self.config, self.device) def _init_mesh(self): self.texture_mesh = TextureMesh(self.config, self.device) def _init_guidance(self):
self.guidance = Guidance(self.config, self.device)
2
2023-11-28 15:38:40+00:00
16k
Vchitect/VBench
vbench/third_party/umt/datasets/build.py
[ { "identifier": "TubeMaskingGenerator", "path": "vbench/third_party/umt/datasets/masking_generator.py", "snippet": "class TubeMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n self.frames, self.height, self.width = input_size\n self.num_patches_per_frame = self.height * ...
import os from torchvision import transforms from .transforms import * from .masking_generator import TubeMaskingGenerator, RandomMaskingGenerator from .mae import VideoMAE from .kinetics import VideoClsDataset from .kinetics_sparse import VideoClsDataset_sparse from .ssv2 import SSVideoClsDataset, SSRawFrameClsDataset
14,384
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type == 'random':
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type == 'random':
self.masked_position_generator = RandomMaskingGenerator(
1
2023-11-27 12:41:46+00:00
16k
HyeonHo99/Video-Motion-Customization
showone/models/unet_3d_condition.py
[ { "identifier": "TransformerTemporalModel", "path": "showone/models/transformer_temporal.py", "snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n \"\"\"\n A Transformer model for video-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16)...
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin from .transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UNetMidBlock3DSimpleCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from diffusers.utils import WEIGHTS_NAME import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import os, json
13,182
for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ # count = len(self.attn_processors.keys()) # ignore temporal attention count = len({k: v for k, v in self.attn_processors.items() if "temp_" not in k}.keys()) # Show-1 original line #count = len(self.attn_processors.keys()) # --> If BoxDiff: use this line if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor") and "temp_" not in name: if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. # Copyright 2023 The ModelScope Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from diffusers.models.transformer_temporal import TransformerTemporalModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet3DConditionOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, it will skip the normalization and activation layers in post-processing norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), mid_block_type: Optional[str] = "UNetMidBlock3DCrossAttn", up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, transfromer_in_opt: bool =False, ): super().__init__() self.sample_size = sample_size self.transformer_in_opt = transfromer_in_opt if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) if self.transformer_in_opt: self.transformer_in = TransformerTemporalModel( num_attention_heads=8, attention_head_dim=64, in_channels=block_out_channels[0], num_layers=1, ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock3DCrossAttn": self.mid_block = UNetMidBlock3DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) elif mid_block_type == "UNetMidBlock3DSimpleCrossAttn": self.mid_block = UNetMidBlock3DSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ # count = len(self.attn_processors.keys()) # ignore temporal attention count = len({k: v for k, v in self.attn_processors.items() if "temp_" not in k}.keys()) # Show-1 original line #count = len(self.attn_processors.keys()) # --> If BoxDiff: use this line if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor") and "temp_" not in name: if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):
6
2023-11-29 17:23:45+00:00
16k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n sel...
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
12,095
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'],
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'],
'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]),
8
2023-11-27 13:44:01+00:00
16k
zhenzhiwang/intercontrol
sample/global_joint_control.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std...
from diffusion.control_diffusion import ControlGaussianDiffusion from diffusion.respace import SpacedDiffusion from utils.fixseed import fixseed from utils.parser_util import edit_control_args from utils.model_util import load_controlmdm_and_diffusion from utils import dist_util from model.cfg_sampler import wrap_model from data_loaders.get_data import get_dataset_loader from data_loaders.humanml.scripts.motion_process import recover_from_ric from data_loaders.humanml_utils import get_control_mask, HML_JOINT_NAMES from data_loaders.humanml.utils.plot_script import plot_3d_motion from model.ControlMDM import ControlMDM import os import numpy as np import torch import data_loaders.humanml.utils.paramUtil as paramUtil import shutil
10,812
# This code is based on https://github.com/openai/guided-diffusion """ Generate a large batch of image samples from a model and save them as a large numpy array. This can be used to produce samples for FID evaluation. """ def main(): args = edit_control_args() assert args.multi_person == False, 'multi-person is not supported for this script' fixseed(args.seed) out_path = args.output_dir name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 fps = 12.5 if args.dataset == 'kit' else 20 dist_util.setup_dist(args.device) if out_path == '': out_path = os.path.join(os.path.dirname(args.model_path), 'edit_{}_{}_{}_seed{}'.format(name, niter, args.inpainting_mask, args.seed)) if args.text_condition != '': out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') print('Loading dataset...') assert args.num_samples <= args.batch_size, \ f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' # So why do we need this check? In order to protect GPU from a memory overload in the following line. # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. # If it doesn't, and you still want to sample more prompts, run this script with different seeds # (specify through the --seed flag) args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples data = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=max_frames, split='test', load_mode='train', size=args.num_samples) # in train mode, you get both text and motion. # data.fixed_length = n_frames total_num_samples = args.num_samples * args.num_repetitions print("Creating model and diffusion...")
# This code is based on https://github.com/openai/guided-diffusion """ Generate a large batch of image samples from a model and save them as a large numpy array. This can be used to produce samples for FID evaluation. """ def main(): args = edit_control_args() assert args.multi_person == False, 'multi-person is not supported for this script' fixseed(args.seed) out_path = args.output_dir name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 fps = 12.5 if args.dataset == 'kit' else 20 dist_util.setup_dist(args.device) if out_path == '': out_path = os.path.join(os.path.dirname(args.model_path), 'edit_{}_{}_{}_seed{}'.format(name, niter, args.inpainting_mask, args.seed)) if args.text_condition != '': out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') print('Loading dataset...') assert args.num_samples <= args.batch_size, \ f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' # So why do we need this check? In order to protect GPU from a memory overload in the following line. # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. # If it doesn't, and you still want to sample more prompts, run this script with different seeds # (specify through the --seed flag) args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples data = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=max_frames, split='test', load_mode='train', size=args.num_samples) # in train mode, you get both text and motion. # data.fixed_length = n_frames total_num_samples = args.num_samples * args.num_repetitions print("Creating model and diffusion...")
DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion
0
2023-11-27 05:28:02+00:00
16k
moonbow721/DPoser
run/completion.py
[ { "identifier": "DistributedEvalSampler", "path": "lib/dataset/EvaSampler.py", "snippet": "class DistributedEvalSampler(Sampler):\n r\"\"\"\n DistributedEvalSampler is different from DistributedSampler.\n It does NOT add extra samples to make it evenly divisible.\n DistributedEvalSampler sho...
import os import math import numpy as np import torch import torch.nn as nn import torch.distributed as dist import torch.multiprocessing as mp from absl import app from absl import flags from absl.flags import argparse_flags from ml_collections.config_flags import config_flags from torch.utils.data import DataLoader from lib.dataset.EvaSampler import DistributedEvalSampler from lib.utils.misc import create_mask, linear_interpolation from tensorboardX import SummaryWriter from torch.utils.tensorboard import SummaryWriter from lib.algorithms.advanced.model import ScoreModelFC from lib.algorithms.advanced import sde_lib, sampling from lib.algorithms.ema import ExponentialMovingAverage from lib.algorithms.advanced import utils as mutils from lib.dataset.AMASS import AMASSDataset, N_POSES, Posenormalizer from lib.body_model.body_model import BodyModel from lib.dataset.AMASS import Evaler
11,704
if weighted: weight = 0.5 * torch.sqrt(1 + SNR) else: weight = 0.5 dposer_loss = torch.mean(weight * self.loss_fn(x_0, denoise_data)) return dposer_loss def get_loss_weights(self): """Set loss weights""" loss_weight = {'data': lambda cst, it: 100 * cst / (1 + it), 'dposer': lambda cst, it: 0.1 * cst * (it + 1)} return loss_weight @staticmethod def backward_step(loss_dict, weight_dict, it): w_loss = dict() for k in loss_dict: w_loss[k] = weight_dict[k](loss_dict[k], it) tot_loss = list(w_loss.values()) tot_loss = torch.stack(tot_loss).sum() return tot_loss def optimize(self, observation, mask, time_strategy='3', lr=0.1, sample_trun=5.0, sample_time=900, iterations=2, steps_per_iter=100): total_steps = iterations * steps_per_iter opti_variable = observation.clone().detach() opti_variable.requires_grad = True optimizer = torch.optim.Adam([opti_variable], lr, betas=(0.9, 0.999)) weight_dict = self.get_loss_weights() loss_dict = dict() eps = 1e-3 timesteps = torch.linspace(self.sde.T, eps, self.sde.N, device=observation.device) for it in range(iterations): for i in range(steps_per_iter): step = it * steps_per_iter + i optimizer.zero_grad() ''' ************* DPoser loss *********** ''' if time_strategy == '1': quan_t = torch.randint(self.sde.N, [1]) elif time_strategy == '2': quan_t = torch.tensor(sample_time) elif time_strategy == '3': quan_t = self.sde.N - math.floor( torch.tensor(total_steps - step - 1) * (self.sde.N / (sample_trun * total_steps))) - 2 else: raise NotImplementedError('unsupported time sampling strategy') t = timesteps[quan_t] vec_t = torch.ones(self.batch_size, device=observation.device) * t loss_dict['dposer'] = self.loss(opti_variable, vec_t, quan_t) loss_dict['data'] = self.data_loss(opti_variable * mask, observation * mask) ''' *********** DPoser loss ************ ''' # Get total loss for backward pass tot_loss = self.backward_step(loss_dict, weight_dict, it) tot_loss.backward() optimizer.step() opti_variable = observation * mask + opti_variable * (1.0 - mask) return opti_variable def inference(rank, args, config): print(f"Running DDP on rank {rank}.") setup(rank, args.gpus, args.port) ## Load the pre-trained checkpoint from disk. device = torch.device("cuda", rank) POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 if config.model.type == 'ScoreModelFC': model = ScoreModelFC( config, n_poses=N_POSES, pose_dim=POSE_DIM, hidden_dim=config.model.HIDDEN_DIM, embed_dim=config.model.EMBED_DIM, n_blocks=config.model.N_BLOCKS, ) else: raise NotImplementedError('unsupported model') model.to(device) ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate) state = dict(optimizer=None, model=model, ema=ema, step=0) # restore checkpoint map_location = {'cuda:0': 'cuda:%d' % rank} checkpoint = torch.load(args.ckpt_path, map_location=map_location) model.load_state_dict(checkpoint['model_state_dict']) ema.load_state_dict(checkpoint['ema']) state['step'] = checkpoint['step'] print(f"=> loaded checkpoint '{args.ckpt_path}' (step {state['step']})") model.eval() ema.copy_to(model.parameters()) # Setup SDEs if config.training.sde.lower() == 'vpsde': sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=args.steps) elif config.training.sde.lower() == 'subvpsde': sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=args.steps) elif config.training.sde.lower() == 'vesde': sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max, N=args.steps) else: raise NotImplementedError(f"SDE {config.training.sde} unknown.") # Setup sampling functions compfn = DPoserComp(model, sde, config.training.continuous, batch_size=args.batch_size) Normalizer = Posenormalizer(data_path=f'{args.dataset_folder}/{args.version}/train', normalize=config.data.normalize, min_max=config.data.min_max, rot_rep=config.data.rot_rep, device=device)
try: except ImportError as e: try: except ImportError as e: print('Tensorboard is not Installed') FLAGS = flags.FLAGS config_flags.DEFINE_config_file( "config", None, "Training configuration.", lock_config=False) flags.mark_flags_as_required(["config"]) def parse_args(argv): parser = argparse_flags.ArgumentParser(description='test diffusion model for completion on whole AMASS') parser.add_argument('--ckpt-path', type=str, default='./pretrained_models/axis-zscore-400k.pth') parser.add_argument('--dataset-folder', type=str, default='../data/AMASS/amass_processed', help='the folder includes necessary normalizing parameters') parser.add_argument('--version', type=str, default='version1', help='dataset version') parser.add_argument('--bodymodel-path', type=str, default='../body_models/smplx/SMPLX_NEUTRAL.npz', help='path of SMPLX model') parser.add_argument('--hypo', type=int, default=1, help='number of hypotheses to sample') parser.add_argument('--part', type=str, default='left_leg', choices=['left_leg', 'right_leg', 'left_arm', 'right_arm', 'trunk', 'hands', 'legs', 'arms']) # optional parser.add_argument('--steps', type=int, default=1000) parser.add_argument('--sample', type=int, help='sample testset to reduce data for other tasks') parser.add_argument('--batch_size', type=int, default=100, ) parser.add_argument('--gpus', type=int, help='num gpus to inference parallel') parser.add_argument('--port', type=str, default='14600', help='master port of machines') args = parser.parse_args(argv[1:]) return args def get_dataloader(dataset, num_replicas=1, rank=0, batch_size=10000): sampler = DistributedEvalSampler(dataset, num_replicas=num_replicas, rank=rank, shuffle=False) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0, sampler=sampler, persistent_workers=False, pin_memory=True, drop_last=True) return dataloader def setup(rank, world_size, port): os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = port # initialize the process group dist.init_process_group("gloo", rank=rank, world_size=world_size) def cleanup(): dist.destroy_process_group() class DPoserComp(object): def __init__(self, diffusion_model, sde, continuous, batch_size=1): self.batch_size = batch_size self.sde = sde self.score_fn = mutils.get_score_fn(sde, diffusion_model, train=False, continuous=continuous) self.rsde = sde.reverse(self.score_fn, False) # L2 loss self.loss_fn = nn.MSELoss(reduction='none') self.data_loss = nn.MSELoss(reduction='mean') def one_step_denoise(self, x_t, t): drift, diffusion, alpha, sigma_2, score = self.rsde.sde(x_t, t, guide=True) x_0_hat = (x_t + sigma_2[:, None] * score) / alpha SNR = alpha / torch.sqrt(sigma_2)[:, None] return x_0_hat.detach(), SNR def multi_step_denoise(self, x_t, t, t_end, N=10): time_traj = linear_interpolation(t, t_end, N + 1) x_current = x_t for i in range(N): t_current = time_traj[i] t_before = time_traj[i + 1] alpha_current, sigma_current = self.sde.return_alpha_sigma(t_current) alpha_before, sigma_before = self.sde.return_alpha_sigma(t_before) score = self.score_fn(x_current, t_current, condition=None, mask=None) score = -score * sigma_current[:, None] # score to noise prediction x_current = alpha_before / alpha_current * ( x_current - sigma_current[:, None] * score) + sigma_before[ :, None] * score alpha, sigma = self.sde.return_alpha_sigma(time_traj[0]) SNR = alpha / sigma[:, None] return x_current.detach(), SNR def loss(self, x_0, vec_t, weighted=False, multi_denoise=False): # x_0: [B, j*6], vec_t: [B], z = torch.randn_like(x_0) mean, std = self.sde.marginal_prob(x_0, vec_t) perturbed_data = mean + std[:, None] * z # if multi_denoise: # not recommended denoise_data, SNR = self.multi_step_denoise(perturbed_data, vec_t, t_end=vec_t / (2 * 10), N=10) else: denoise_data, SNR = self.one_step_denoise(perturbed_data, vec_t) if weighted: weight = 0.5 * torch.sqrt(1 + SNR) else: weight = 0.5 dposer_loss = torch.mean(weight * self.loss_fn(x_0, denoise_data)) return dposer_loss def get_loss_weights(self): """Set loss weights""" loss_weight = {'data': lambda cst, it: 100 * cst / (1 + it), 'dposer': lambda cst, it: 0.1 * cst * (it + 1)} return loss_weight @staticmethod def backward_step(loss_dict, weight_dict, it): w_loss = dict() for k in loss_dict: w_loss[k] = weight_dict[k](loss_dict[k], it) tot_loss = list(w_loss.values()) tot_loss = torch.stack(tot_loss).sum() return tot_loss def optimize(self, observation, mask, time_strategy='3', lr=0.1, sample_trun=5.0, sample_time=900, iterations=2, steps_per_iter=100): total_steps = iterations * steps_per_iter opti_variable = observation.clone().detach() opti_variable.requires_grad = True optimizer = torch.optim.Adam([opti_variable], lr, betas=(0.9, 0.999)) weight_dict = self.get_loss_weights() loss_dict = dict() eps = 1e-3 timesteps = torch.linspace(self.sde.T, eps, self.sde.N, device=observation.device) for it in range(iterations): for i in range(steps_per_iter): step = it * steps_per_iter + i optimizer.zero_grad() ''' ************* DPoser loss *********** ''' if time_strategy == '1': quan_t = torch.randint(self.sde.N, [1]) elif time_strategy == '2': quan_t = torch.tensor(sample_time) elif time_strategy == '3': quan_t = self.sde.N - math.floor( torch.tensor(total_steps - step - 1) * (self.sde.N / (sample_trun * total_steps))) - 2 else: raise NotImplementedError('unsupported time sampling strategy') t = timesteps[quan_t] vec_t = torch.ones(self.batch_size, device=observation.device) * t loss_dict['dposer'] = self.loss(opti_variable, vec_t, quan_t) loss_dict['data'] = self.data_loss(opti_variable * mask, observation * mask) ''' *********** DPoser loss ************ ''' # Get total loss for backward pass tot_loss = self.backward_step(loss_dict, weight_dict, it) tot_loss.backward() optimizer.step() opti_variable = observation * mask + opti_variable * (1.0 - mask) return opti_variable def inference(rank, args, config): print(f"Running DDP on rank {rank}.") setup(rank, args.gpus, args.port) ## Load the pre-trained checkpoint from disk. device = torch.device("cuda", rank) POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 if config.model.type == 'ScoreModelFC': model = ScoreModelFC( config, n_poses=N_POSES, pose_dim=POSE_DIM, hidden_dim=config.model.HIDDEN_DIM, embed_dim=config.model.EMBED_DIM, n_blocks=config.model.N_BLOCKS, ) else: raise NotImplementedError('unsupported model') model.to(device) ema = ExponentialMovingAverage(model.parameters(), decay=config.model.ema_rate) state = dict(optimizer=None, model=model, ema=ema, step=0) # restore checkpoint map_location = {'cuda:0': 'cuda:%d' % rank} checkpoint = torch.load(args.ckpt_path, map_location=map_location) model.load_state_dict(checkpoint['model_state_dict']) ema.load_state_dict(checkpoint['ema']) state['step'] = checkpoint['step'] print(f"=> loaded checkpoint '{args.ckpt_path}' (step {state['step']})") model.eval() ema.copy_to(model.parameters()) # Setup SDEs if config.training.sde.lower() == 'vpsde': sde = sde_lib.VPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=args.steps) elif config.training.sde.lower() == 'subvpsde': sde = sde_lib.subVPSDE(beta_min=config.model.beta_min, beta_max=config.model.beta_max, N=args.steps) elif config.training.sde.lower() == 'vesde': sde = sde_lib.VESDE(sigma_min=config.model.sigma_min, sigma_max=config.model.sigma_max, N=args.steps) else: raise NotImplementedError(f"SDE {config.training.sde} unknown.") # Setup sampling functions compfn = DPoserComp(model, sde, config.training.continuous, batch_size=args.batch_size) Normalizer = Posenormalizer(data_path=f'{args.dataset_folder}/{args.version}/train', normalize=config.data.normalize, min_max=config.data.min_max, rot_rep=config.data.rot_rep, device=device)
test_dataset = AMASSDataset(root_path=args.dataset_folder,
8
2023-11-29 15:55:50+00:00
16k
KylinYee/R2-Talker-code
main.py
[ { "identifier": "NeRFDataset", "path": "nerf/provider.py", "snippet": "class NeRFDataset:\n def __init__(self, opt, device, type='train', downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.type = type # train, val, test\n sel...
import torch import argparse from nerf.provider import NeRFDataset from nerf.gui import NeRFGUI from nerf.utils import * from nerf.network import NeRFNetwork, R2TalkerNeRF, GeneNeRFNetwork
13,028
parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." if opt.finetune_lips: # do not update density grid in finetune stage opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train: test_set = NeRFDataset(opt, device=device, type='train') # a manual fix to test on the training dataset test_set.training = False test_set.num_rays = -1 test_loader = test_set.dataloader() else: test_loader = NeRFDataset(opt, device=device, type='test').dataloader() # temp fix: for update_extra_states model.aud_features = test_loader._data.auds model.eye_areas = test_loader._data.eye_area if opt.gui: # we still need test_loader to provide audio features for testing.
# torch.autograd.set_detect_anomaly(True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('path', type=str) parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye") parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)") parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)") parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use") parser.add_argument('--workspace', type=str, default='workspace') parser.add_argument('--seed', type=int, default=0) ### training options parser.add_argument('--iters', type=int, default=200000, help="training iters") parser.add_argument('--lr', type=float, default=5e-3, help="initial learning rate") parser.add_argument('--lr_net', type=float, default=5e-4, help="initial learning rate") parser.add_argument('--ckpt', type=str, default='latest') parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step") parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)") parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)") ### network backbone options parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training") parser.add_argument('--lambda_amb', type=float, default=0.1, help="lambda for ambient loss") parser.add_argument('--bg_img', type=str, default='', help="background image") parser.add_argument('--fbg', action='store_true', help="frame-wise bg") parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes") parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") parser.add_argument('--preload', type=int, default=0, help="0 means load data from disk on-the-fly, 1 means preload to CPU, 2 means GPU.") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--aud', type=str, default='', help="audio source (empty will load the default, else should be a path to a npy file)") parser.add_argument('--cond_type', type=str, default=None, help="type of driving condition: eo, ds, idexp") parser.add_argument('--method', type=str, default='r2talker', help="r2talker, genefaceDagger, rad-nerf") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' if opt.O: opt.fp16 = True opt.exp_eye = True if opt.test: opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." if opt.patch_size > 1: # assert opt.patch_size > 16, "patch_size should > 16 to run LPIPS loss." assert opt.num_rays % (opt.patch_size ** 2) == 0, "patch_size ** 2 should be dividable by num_rays." if opt.finetune_lips: # do not update density grid in finetune stage opt.update_extra_interval = 1e9 print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # manually load state dict for head if opt.torso and opt.head_ckpt != '': model_dict = torch.load(opt.head_ckpt, map_location='cpu')['model'] missing_keys, unexpected_keys = model.load_state_dict(model_dict, strict=False) if len(missing_keys) > 0: print(f"[WARN] missing keys: {missing_keys}") if len(unexpected_keys) > 0: print(f"[WARN] unexpected keys: {unexpected_keys}") # freeze these keys for k, v in model.named_parameters(): if k in model_dict: # print(f'[INFO] freeze {k}, {v.shape}') v.requires_grad = False # print(model) criterion = torch.nn.MSELoss(reduction='none') if opt.test: if opt.gui: metrics = [] # use no metric in GUI for faster initialization... else: # metrics = [PSNRMeter(), LPIPSMeter(device=device)] metrics = [PSNRMeter(), LPIPSMeter(device=device), LMDMeter(backend='fan')] trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, criterion=criterion, fp16=opt.fp16, metrics=metrics, use_checkpoint=opt.ckpt) if opt.test_train: test_set = NeRFDataset(opt, device=device, type='train') # a manual fix to test on the training dataset test_set.training = False test_set.num_rays = -1 test_loader = test_set.dataloader() else: test_loader = NeRFDataset(opt, device=device, type='test').dataloader() # temp fix: for update_extra_states model.aud_features = test_loader._data.auds model.eye_areas = test_loader._data.eye_area if opt.gui: # we still need test_loader to provide audio features for testing.
with NeRFGUI(opt, trainer, test_loader) as gui:
1
2023-12-04 12:51:59+00:00
16k
ubc-vision/vivid123
vivid123/generation_utils.py
[ { "identifier": "CLIPCameraProjection", "path": "vivid123/models/clip_camera_projection.py", "snippet": "class CLIPCameraProjection(ModelMixin, ConfigMixin):\n \"\"\"\n A Projection layer for CLIP embedding and camera embedding.\n Parameters:\n embedding_dim (`int`, *optional*, defaults ...
import os import yaml import re import torch import numpy as np import imageio.v3 as imageio from typing import List, Any from yaml.parser import ParserError from PIL import Image from diffusers.pipelines import DiffusionPipeline from diffusers.models import UNet2DConditionModel, AutoencoderKL from diffusers.schedulers import DPMSolverMultistepScheduler, EulerDiscreteScheduler from diffusers.pipelines import DiffusionPipeline from transformers import CLIPVisionModelWithProjection from .models import CLIPCameraProjection from .pipelines import ViVid123Pipeline from .configs import ViVid123BaseSchema
10,846
video_linear_end_weight: float = 0.5, video_start_step_percentage: float = 0.0, video_end_step_percentage: float = 1.0, zero123_linear_start_weight: float = 1.0, zero123_linear_end_weight: float = 1.0, zero123_start_step_percentage: float = 0.0, zero123_end_step_percentage: float = 1.0, ): """ Prepare the fusion schedule of video diffusion and zero123 at all the denoising steps Args: video_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the video diffusion at the start of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_linear_end_weight (`float`, *optional*, defaults to 0.5): The weight of the video diffusion at the end of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the video diffusion starts. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. video_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the video diffusion ends. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. zero123_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the start of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_linear_end_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the end of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the zero123 diffusion starts. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. zero123_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the zero123 diffusion ends. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. Return: A tuple of two tensors, video_schedule (`torch.Tensor`): The schedule of the video diffusion weighting, with shape `[num_inference_steps]`. zero123_schedule (`torch.Tensor`): The schedule of the zero123 diffusion weighting, with shape `[num_inference_steps]`. """ assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet")
def prepare_cam_pose_input( num_frames: int = 25, delta_elevation_start: float = 0.0, delta_elevation_end: float = 0.0, delta_azimuth_start: float = -45.0, delta_azimuth_end: float = 45.0, delta_radius_start: float = 0.0, delta_radius_end: float = 0.0, ): r""" The function to prepare the input to the vivid123 pipeline Args: delta_elevation_start (`float`, *optional*, defaults to 0.0): The starting relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_elevation_end (`float`, *optional*, defaults to 0.0): The ending relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_start (`float`, *optional*, defaults to -45.0): The starting relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_end (`float`, *optional*, defaults to 45.0): The ending relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. Returns: """ cam_elevation = np.radians(np.linspace(delta_elevation_start, delta_elevation_end, num_frames))[..., None] cam_azimuth = np.radians(np.linspace(delta_azimuth_start, delta_azimuth_end, num_frames)) cam_azimuth_sin_cos = np.stack([np.sin(cam_azimuth), np.cos(cam_azimuth)], axis=-1) cam_radius = np.linspace(delta_radius_start, delta_radius_end, num_frames)[..., None] cam_pose_np = np.concatenate([cam_elevation, cam_azimuth_sin_cos, cam_radius], axis=-1) cam_pose_torch = torch.from_numpy(cam_pose_np) return cam_pose_torch # refer to https://stackoverflow.com/a/33507138/6257375 def conver_rgba_to_rgb_white_bg( image: Image, H: int = 256, W: int = 256, ): input_image = image.convert("RGBA").resize((H, W), Image.BICUBIC) background = Image.new("RGBA", input_image.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, input_image) return alpha_composite def prepare_fusion_schedule_linear( num_inference_steps: int = 50, video_linear_start_weight: float = 1.0, video_linear_end_weight: float = 0.5, video_start_step_percentage: float = 0.0, video_end_step_percentage: float = 1.0, zero123_linear_start_weight: float = 1.0, zero123_linear_end_weight: float = 1.0, zero123_start_step_percentage: float = 0.0, zero123_end_step_percentage: float = 1.0, ): """ Prepare the fusion schedule of video diffusion and zero123 at all the denoising steps Args: video_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the video diffusion at the start of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_linear_end_weight (`float`, *optional*, defaults to 0.5): The weight of the video diffusion at the end of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the video diffusion starts. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. video_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the video diffusion ends. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. zero123_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the start of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_linear_end_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the end of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the zero123 diffusion starts. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. zero123_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the zero123 diffusion ends. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. Return: A tuple of two tensors, video_schedule (`torch.Tensor`): The schedule of the video diffusion weighting, with shape `[num_inference_steps]`. zero123_schedule (`torch.Tensor`): The schedule of the zero123 diffusion weighting, with shape `[num_inference_steps]`. """ assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet")
zero123_cam_proj = CLIPCameraProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="clip_camera_projection")
0
2023-11-27 22:48:17+00:00
16k
TISUnion/PrimeBackup
prime_backup/debug_entry.py
[ { "identifier": "logger", "path": "prime_backup/logger.py", "snippet": "def __create_logger() -> logging.Logger:\ndef get() -> logging.Logger:" }, { "identifier": "CreateBackupAction", "path": "prime_backup/action/create_backup_action.py", "snippet": "class CreateBackupAction(CreateBacku...
import time from pathlib import Path from prime_backup import logger from prime_backup.action.create_backup_action import CreateBackupAction from prime_backup.action.delete_backup_action import DeleteBackupAction from prime_backup.action.export_backup_action import ExportBackupToDirectoryAction, ExportBackupToTarAction, ExportBackupToZipAction from prime_backup.action.import_backup_action import ImportBackupAction from prime_backup.action.list_backup_action import ListBackupAction from prime_backup.action.migrate_compress_method_action import MigrateCompressMethodAction from prime_backup.compressors import CompressMethod from prime_backup.db.access import DbAccess from prime_backup.types.operator import Operator from prime_backup.types.tar_format import TarFormat
13,019
def main(): DbAccess.init() backup_id = 1 logger.get().info('debug entry start') def create(n: int = 1): nonlocal backup_id for i in range(n): t = time.time() bka = CreateBackupAction(Operator.player('Steve'), '测试彩色测试') backup_id = bka.run().id print('cost', round(time.time() - t, 2), 's') def create_if_1st(): nonlocal backup_id t = time.time() if backup_id == 1: bka = CreateBackupAction(Operator.player('Steve'), 'test2') backup_id = bka.run().id print('cost', round(time.time() - t, 2), 's') def export(bid=None): if bid is None: bid = backup_id t = time.time()
def main(): DbAccess.init() backup_id = 1 logger.get().info('debug entry start') def create(n: int = 1): nonlocal backup_id for i in range(n): t = time.time() bka = CreateBackupAction(Operator.player('Steve'), '测试彩色测试') backup_id = bka.run().id print('cost', round(time.time() - t, 2), 's') def create_if_1st(): nonlocal backup_id t = time.time() if backup_id == 1: bka = CreateBackupAction(Operator.player('Steve'), 'test2') backup_id = bka.run().id print('cost', round(time.time() - t, 2), 's') def export(bid=None): if bid is None: bid = backup_id t = time.time()
_ = [ExportBackupToDirectoryAction, ExportBackupToTarAction, ExportBackupToZipAction, TarFormat]
3
2023-11-28 19:03:36+00:00
16k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-i...
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
13,339
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), }
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), }
pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
2
2023-11-27 07:01:39+00:00
16k
IanYeung/MGLD-VSR
scripts/vsr_val_ddpm_text_T_vqganfin_old.py
[ { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n ra...
import argparse, os, sys, glob import PIL import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import torchvision import time import math import copy from omegaconf import OmegaConf from PIL import Image from tqdm import tqdm, trange from itertools import islice from einops import rearrange, repeat from torchvision.utils import make_grid from torch import autocast from contextlib import nullcontext from pytorch_lightning import seed_everything from ldm.util import instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.plms import PLMSSampler from basicsr.archs.arch_util import resize_flow from scripts.util_flow import forward_backward_consistency_check from scripts.wavelet_color_fix import wavelet_reconstruction, adaptive_instance_normalization
12,910
def space_timesteps(num_timesteps, section_counts): """ Create a list of timesteps to use from an original diffusion process, given the number of timesteps we want to take from equally-sized portions of the original process. For example, if there's 300 timesteps and the section counts are [10,15,20] then the first 100 timesteps are strided to be 10 timesteps, the second 100 are strided to be 15 timesteps, and the final 100 are strided to be 20. If the stride is a string starting with "ddim", then the fixed striding from the DDIM paper is used, and only one section is allowed. :param num_timesteps: the number of diffusion steps in the original process to divide up. :param section_counts: either a list of numbers, or a string containing comma-separated numbers, indicating the step count per section. As a special case, use "ddimN" where N is a number of steps to use the striding from the DDIM paper. :return: a set of diffusion steps from the original process to use. """ if isinstance(section_counts, str): if section_counts.startswith("ddim"): desired_count = int(section_counts[len("ddim"):]) for i in range(1, num_timesteps): if len(range(0, num_timesteps, i)) == desired_count: return set(range(0, num_timesteps, i)) raise ValueError( f"cannot create exactly {num_timesteps} steps with an integer stride" ) section_counts = [int(x) for x in section_counts.split(",")] #[250,] size_per = num_timesteps // len(section_counts) extra = num_timesteps % len(section_counts) start_idx = 0 all_steps = [] for i, section_count in enumerate(section_counts): size = size_per + (1 if i < extra else 0) if size < section_count: raise ValueError( f"cannot divide section of {size} steps into {section_count}" ) if section_count <= 1: frac_stride = 1 else: frac_stride = (size - 1) / (section_count - 1) cur_idx = 0.0 taken_steps = [] for _ in range(section_count): taken_steps.append(start_idx + round(cur_idx)) cur_idx += frac_stride all_steps += taken_steps start_idx += size return set(all_steps) def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"]
def space_timesteps(num_timesteps, section_counts): """ Create a list of timesteps to use from an original diffusion process, given the number of timesteps we want to take from equally-sized portions of the original process. For example, if there's 300 timesteps and the section counts are [10,15,20] then the first 100 timesteps are strided to be 10 timesteps, the second 100 are strided to be 15 timesteps, and the final 100 are strided to be 20. If the stride is a string starting with "ddim", then the fixed striding from the DDIM paper is used, and only one section is allowed. :param num_timesteps: the number of diffusion steps in the original process to divide up. :param section_counts: either a list of numbers, or a string containing comma-separated numbers, indicating the step count per section. As a special case, use "ddimN" where N is a number of steps to use the striding from the DDIM paper. :return: a set of diffusion steps from the original process to use. """ if isinstance(section_counts, str): if section_counts.startswith("ddim"): desired_count = int(section_counts[len("ddim"):]) for i in range(1, num_timesteps): if len(range(0, num_timesteps, i)) == desired_count: return set(range(0, num_timesteps, i)) raise ValueError( f"cannot create exactly {num_timesteps} steps with an integer stride" ) section_counts = [int(x) for x in section_counts.split(",")] #[250,] size_per = num_timesteps // len(section_counts) extra = num_timesteps % len(section_counts) start_idx = 0 all_steps = [] for i, section_count in enumerate(section_counts): size = size_per + (1 if i < extra else 0) if size < section_count: raise ValueError( f"cannot divide section of {size} steps into {section_count}" ) if section_count <= 1: frac_stride = 1 else: frac_stride = (size - 1) / (section_count - 1) cur_idx = 0.0 taken_steps = [] for _ in range(section_count): taken_steps.append(start_idx + round(cur_idx)) cur_idx += frac_stride all_steps += taken_steps start_idx += size return set(all_steps) def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
0
2023-11-30 01:50:29+00:00
16k
Institute4FutureHealth/CHA
tasks/types.py
[ { "identifier": "ActivityAnalysis", "path": "tasks/affect/activity_analysis.py", "snippet": "class ActivityAnalysis(Affect):\n \"\"\"\n **Description:**\n\n This tasks performs average, sum, or trend analysis on the provided raw activity affect data for specific patient.\n \"\"\"\n\n ...
from typing import Dict from typing import Type from tasks.affect import ActivityAnalysis from tasks.affect import ActivityGet from tasks.affect import SleepAnalysis from tasks.affect import SleepGet from tasks.ask_user import AskUser from tasks.google_translator import GoogleTranslate from tasks.playwright import Click from tasks.playwright import CurrentWebPage from tasks.playwright import ExtractHyperlinks from tasks.playwright import ExtractText from tasks.playwright import GetElements from tasks.playwright import Navigate from tasks.playwright import NavigateBack from tasks.read_from_datapipe import ReadDataPipe from tasks.serpapi import SerpAPI from tasks.task import BaseTask from tasks.task_types import TaskType from tasks.test_file import TestFile
14,046
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText, TaskType.GET_ELEMENTS: GetElements, TaskType.NAVIGATE_BACK: NavigateBack, TaskType.NAVIGATE: Navigate, TaskType.AFFECT_SLEEP_GET: SleepGet, TaskType.AFFECT_ACTIVITY_GET: ActivityGet, TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis, TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis, TaskType.GOOGLE_TRANSLATE: GoogleTranslate, TaskType.ASK_USER: AskUser, TaskType.TEST_FILE: TestFile,
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText, TaskType.GET_ELEMENTS: GetElements, TaskType.NAVIGATE_BACK: NavigateBack, TaskType.NAVIGATE: Navigate, TaskType.AFFECT_SLEEP_GET: SleepGet, TaskType.AFFECT_ACTIVITY_GET: ActivityGet, TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis, TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis, TaskType.GOOGLE_TRANSLATE: GoogleTranslate, TaskType.ASK_USER: AskUser, TaskType.TEST_FILE: TestFile,
TaskType.READ_FROM_DATAPIPE: ReadDataPipe,
13
2023-12-02 05:10:44+00:00
16k
Czm369/MixPL
projects/Detic_new/detic/centernet_rpn_head.py
[ { "identifier": "CenterNetUpdateHead", "path": "mmdet/models/dense_heads/centernet_update_head.py", "snippet": "class CenterNetUpdateHead(AnchorFreeHead):\n \"\"\"CenterNetUpdateHead is an improved version of CenterNet in CenterNet2.\n Paper link `<https://arxiv.org/abs/2103.07461>`_.\n\n Args:...
import copy import torch import torch.nn as nn from typing import Dict, List, Optional, Sequence, Tuple from mmcv.cnn import Scale from mmengine import ConfigDict from mmengine.structures import InstanceData from torch import Tensor from mmdet.models.dense_heads import CenterNetUpdateHead from mmdet.models.utils import unpack_gt_instances from mmdet.registry import MODELS from mmdet.structures import SampleList from mmdet.structures.bbox import bbox2distance from mmdet.utils import (ConfigType, InstanceList, OptConfigType, OptInstanceList, reduce_mean) from .iou_loss import IOULoss
10,823
# F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) return cls_score, bbox_pred # score aligned, box larger def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = cls_scores[0].size(0) assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) # 1 flatten outputs flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) assert (torch.isfinite(flatten_bbox_preds).all().item()) # 2 calc reg and cls branch targets cls_targets, bbox_targets = self.get_targets(all_level_points, batch_gt_instances) # 3 pos index for cls branch featmap_sizes = flatten_points.new_tensor(featmap_sizes) if self.more_pos: pos_inds, cls_labels = self.add_cls_pos_inds( flatten_points, flatten_bbox_preds, featmap_sizes, batch_gt_instances) else: pos_inds = self._get_label_inds(batch_gt_instances, batch_img_metas, featmap_sizes) # 4 calc cls loss if pos_inds is None: # num_gts=0 num_pos_cls = bbox_preds[0].new_tensor(0, dtype=torch.float) else: num_pos_cls = bbox_preds[0].new_tensor( len(pos_inds), dtype=torch.float) num_pos_cls = max(reduce_mean(num_pos_cls), 1.0) cat_agn_cls_targets = cls_targets.max(dim=1)[0] # M cls_pos_loss, cls_neg_loss = self.loss_cls( flatten_cls_scores.squeeze(1), cat_agn_cls_targets, pos_inds, num_pos_cls) # 5 calc reg loss pos_bbox_inds = torch.nonzero( bbox_targets.max(dim=1)[0] >= 0).squeeze(1) pos_bbox_preds = flatten_bbox_preds[pos_bbox_inds] pos_bbox_targets = bbox_targets[pos_bbox_inds] bbox_weight_map = cls_targets.max(dim=1)[0] bbox_weight_map = bbox_weight_map[pos_bbox_inds] bbox_weight_map = bbox_weight_map if self.soft_weight_on_reg \ else torch.ones_like(bbox_weight_map) num_pos_bbox = max(reduce_mean(bbox_weight_map.sum()), 1.0) if len(pos_bbox_inds) > 0: bbox_loss = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, bbox_weight_map, reduction='sum') / num_pos_bbox else: bbox_loss = flatten_bbox_preds.sum() * 0 return dict( loss_bbox=bbox_loss, loss_cls_pos=cls_pos_loss, loss_cls_neg=cls_neg_loss) def loss_and_predict( self, x: Tuple[Tensor],
# Copyright (c) OpenMMLab. All rights reserved. # from .heatmap_focal_loss import binary_heatmap_focal_loss_jit INF = 1000000000 RangeType = Sequence[Tuple[int, int]] @MODELS.register_module() class CenterNetRPNHead(CenterNetUpdateHead): """CenterNetUpdateHead is an improved version of CenterNet in CenterNet2. Paper link `<https://arxiv.org/abs/2103.07461>`_. Args: num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channel in the input feature map. regress_ranges (Sequence[Tuple[int, int]]): Regress range of multiple level points. hm_min_radius (int): Heatmap target minimum radius of cls branch. Defaults to 4. hm_min_overlap (float): Heatmap target minimum overlap of cls branch. Defaults to 0.8. more_pos_thresh (float): The filtering threshold when the cls branch adds more positive samples. Defaults to 0.2. more_pos_topk (int): The maximum number of additional positive samples added to each gt. Defaults to 9. soft_weight_on_reg (bool): Whether to use the soft target of the cls branch as the soft weight of the bbox branch. Defaults to False. loss_cls (:obj:`ConfigDict` or dict): Config of cls loss. Defaults to dict(type='GaussianFocalLoss', loss_weight=1.0) loss_bbox (:obj:`ConfigDict` or dict): Config of bbox loss. Defaults to dict(type='GIoULoss', loss_weight=2.0). norm_cfg (:obj:`ConfigDict` or dict, optional): dictionary to construct and config norm layer. Defaults to ``norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)``. train_cfg (:obj:`ConfigDict` or dict, optional): Training config. Unused in CenterNet. Reserved for compatibility with SingleStageDetector. test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of CenterNet. """ def __init__(self, num_classes: int, in_channels: int, regress_ranges: RangeType = ((0, 80), (64, 160), (128, 320), (256, 640), (512, INF)), hm_min_radius: int = 4, hm_min_overlap: float = 0.8, more_pos: bool = False, more_pos_thresh: float = 0.2, more_pos_topk: int = 9, soft_weight_on_reg: bool = False, not_clamp_box: bool = False, loss_cls: ConfigType = dict( type='HeatmapFocalLoss', alpha=0.25, beta=4.0, gamma=2.0, pos_weight=1.0, neg_weight=1.0, sigmoid_clamp=1e-4, ignore_high_fp=-1.0, loss_weight=1.0, ), loss_bbox: ConfigType = dict( type='GIoULoss', loss_weight=2.0), norm_cfg: OptConfigType = dict( type='GN', num_groups=32, requires_grad=True), train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, **kwargs) -> None: super().__init__( num_classes=num_classes, in_channels=in_channels, # loss_bbox=loss_bbox, loss_cls=loss_cls, norm_cfg=norm_cfg, train_cfg=train_cfg, test_cfg=test_cfg, **kwargs) self.soft_weight_on_reg = soft_weight_on_reg self.hm_min_radius = hm_min_radius self.more_pos_thresh = more_pos_thresh self.more_pos_topk = more_pos_topk self.more_pos = more_pos self.not_clamp_box = not_clamp_box self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap) self.loss_bbox = IOULoss('giou') # GaussianFocalLoss must be sigmoid mode self.use_sigmoid_cls = True self.cls_out_channels = num_classes self.regress_ranges = regress_ranges self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def _init_layers(self) -> None: """Initialize layers of the head.""" self._init_reg_convs() self._init_predictor() def forward_single(self, x: Tensor, scale: Scale, stride: int) -> Tuple[Tensor, Tensor]: """Forward features of a single scale level. Args: x (Tensor): FPN feature maps of the specified stride. scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize the bbox prediction. stride (int): The corresponding stride for feature maps. Returns: tuple: scores for each class, bbox predictions of input feature maps. """ for m in self.reg_convs: x = m(x) cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(bbox_pred).float() # bbox_pred needed for gradient computation has been modified # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace # F.relu(bbox_pred) with bbox_pred.clamp(min=0) bbox_pred = bbox_pred.clamp(min=0) return cls_score, bbox_pred # score aligned, box larger def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None ) -> Dict[str, Tensor]: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level, each is a 4D-tensor, the channel number is num_classes. bbox_preds (list[Tensor]): Box energies / deltas for each scale level, each is a 4D-tensor, the channel number is 4. batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ num_imgs = cls_scores[0].size(0) assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) # 1 flatten outputs flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) assert (torch.isfinite(flatten_bbox_preds).all().item()) # 2 calc reg and cls branch targets cls_targets, bbox_targets = self.get_targets(all_level_points, batch_gt_instances) # 3 pos index for cls branch featmap_sizes = flatten_points.new_tensor(featmap_sizes) if self.more_pos: pos_inds, cls_labels = self.add_cls_pos_inds( flatten_points, flatten_bbox_preds, featmap_sizes, batch_gt_instances) else: pos_inds = self._get_label_inds(batch_gt_instances, batch_img_metas, featmap_sizes) # 4 calc cls loss if pos_inds is None: # num_gts=0 num_pos_cls = bbox_preds[0].new_tensor(0, dtype=torch.float) else: num_pos_cls = bbox_preds[0].new_tensor( len(pos_inds), dtype=torch.float) num_pos_cls = max(reduce_mean(num_pos_cls), 1.0) cat_agn_cls_targets = cls_targets.max(dim=1)[0] # M cls_pos_loss, cls_neg_loss = self.loss_cls( flatten_cls_scores.squeeze(1), cat_agn_cls_targets, pos_inds, num_pos_cls) # 5 calc reg loss pos_bbox_inds = torch.nonzero( bbox_targets.max(dim=1)[0] >= 0).squeeze(1) pos_bbox_preds = flatten_bbox_preds[pos_bbox_inds] pos_bbox_targets = bbox_targets[pos_bbox_inds] bbox_weight_map = cls_targets.max(dim=1)[0] bbox_weight_map = bbox_weight_map[pos_bbox_inds] bbox_weight_map = bbox_weight_map if self.soft_weight_on_reg \ else torch.ones_like(bbox_weight_map) num_pos_bbox = max(reduce_mean(bbox_weight_map.sum()), 1.0) if len(pos_bbox_inds) > 0: bbox_loss = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, bbox_weight_map, reduction='sum') / num_pos_bbox else: bbox_loss = flatten_bbox_preds.sum() * 0 return dict( loss_bbox=bbox_loss, loss_cls_pos=cls_pos_loss, loss_cls_neg=cls_neg_loss) def loss_and_predict( self, x: Tuple[Tensor],
batch_data_samples: SampleList,
3
2023-11-30 08:58:00+00:00
16k
SEU-ProactiveSecurity-Group/MalPurifier
examples/md_nn_test.py
[ { "identifier": "Dataset", "path": "core/defense/dataset.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n def __init__(self, seed=0, device='cuda', feature_ext_args=None):\n \"\"\"\n 为机器学习模型学习构建数据集。\n \n :param seed: 随机种子\n :param device: 设备类型,'cuda' 或 'c...
import os.path as path import argparse import time import numpy from core.defense import Dataset from core.defense import MalwareDetectionDNN from tools.utils import save_args, get_group_args, to_tensor, dump_pickle, read_pickle
11,388
# 使用未来版本特性,确保代码在Python2和Python3中有一致的行为 from __future__ import absolute_import from __future__ import division from __future__ import print_function # 导入所需的库 # 导入自定义模块 # 初始化argparse对象,用于解析命令行参数 cmd_md = argparse.ArgumentParser(description='arguments for learning malware detector') # 定义与特征提取相关的命令行参数 feature_argparse = cmd_md.add_argument_group(title='feature') feature_argparse.add_argument('--proc_number', type=int, default=2, help='The number of threads for features extraction.') # 特征提取的线程数量 feature_argparse.add_argument('--number_of_smali_files', type=int, default=1000000, help='The maximum number of smali files to represent each app') # 表示每个应用的smali文件的最大数量 feature_argparse.add_argument('--max_vocab_size', type=int, default=10000, help='The maximum number of vocabulary size') # 词汇的最大数量 feature_argparse.add_argument('--update', action='store_true', help='Whether update the existed features.') # 是否更新已存在的特征 # 定义与检测器相关的命令行参数 detector_argparse = cmd_md.add_argument_group(title='detector') detector_argparse.add_argument('--cuda', action='store_true', default=False, help='whether use cuda enable gpu or cpu.') # 是否使用CUDA启用GPU detector_argparse.add_argument('--seed', type=int, default=0, help='random seed.') # 随机种子 detector_argparse.add_argument('--dense_hidden_units', type=lambda s: [int(u) for u in s.split(',')], default='200,200', help='delimited list input, e.g., "200,200"') # 密集隐藏单元列表 detector_argparse.add_argument('--dropout', type=float, default=0.6, help='dropout rate') # dropout率 detector_argparse.add_argument('--alpha_', type=float, default=0.2, help='slope coefficient of leaky-relu or elu') # leaky-relu或elu的斜率系数 detector_argparse.add_argument('--smooth', action='store_true', default=False, help='use smooth activation elu (rather than leaky-relu) in the GAT layer.') # 在GAT层使用平滑激活函数elu detector_argparse.add_argument('--batch_size', type=int, default=128, help='mini-batch size') # mini-batch大小 detector_argparse.add_argument('--epochs', type=int, default=50, help='number of epochs to train.') # 训练的epoch数 detector_argparse.add_argument('--lr', type=float, default=0.001, help='initial learning rate.') # 初始学习率 detector_argparse.add_argument('--weight_decay', type=float, default=0e-4, help='coefficient of weight decay') # 权重衰减系数 # 定义与数据集相关的命令行参数 dataset_argparse = cmd_md.add_argument_group(title='data_producer') detector_argparse.add_argument('--cache', action='store_true', default=False, help='use cache data or not.') # 是否使用缓存数据 # 定义与模式相关的命令行参数 mode_argparse = cmd_md.add_argument_group(title='mode') mode_argparse.add_argument('--mode', type=str, default='train', choices=['train', 'test'], required=False, help='learn a model or test it.') # 学习模型或测试模型的模式 mode_argparse.add_argument('--model_name', type=str, default='xxxxxxxx-xxxxxx', required=False, help='suffix date of a tested model name.') # 测试模型名称的后缀日期 # 定义主函数 def _main(): args = cmd_md.parse_args() # 根据参数创建数据集
# 使用未来版本特性,确保代码在Python2和Python3中有一致的行为 from __future__ import absolute_import from __future__ import division from __future__ import print_function # 导入所需的库 # 导入自定义模块 # 初始化argparse对象,用于解析命令行参数 cmd_md = argparse.ArgumentParser(description='arguments for learning malware detector') # 定义与特征提取相关的命令行参数 feature_argparse = cmd_md.add_argument_group(title='feature') feature_argparse.add_argument('--proc_number', type=int, default=2, help='The number of threads for features extraction.') # 特征提取的线程数量 feature_argparse.add_argument('--number_of_smali_files', type=int, default=1000000, help='The maximum number of smali files to represent each app') # 表示每个应用的smali文件的最大数量 feature_argparse.add_argument('--max_vocab_size', type=int, default=10000, help='The maximum number of vocabulary size') # 词汇的最大数量 feature_argparse.add_argument('--update', action='store_true', help='Whether update the existed features.') # 是否更新已存在的特征 # 定义与检测器相关的命令行参数 detector_argparse = cmd_md.add_argument_group(title='detector') detector_argparse.add_argument('--cuda', action='store_true', default=False, help='whether use cuda enable gpu or cpu.') # 是否使用CUDA启用GPU detector_argparse.add_argument('--seed', type=int, default=0, help='random seed.') # 随机种子 detector_argparse.add_argument('--dense_hidden_units', type=lambda s: [int(u) for u in s.split(',')], default='200,200', help='delimited list input, e.g., "200,200"') # 密集隐藏单元列表 detector_argparse.add_argument('--dropout', type=float, default=0.6, help='dropout rate') # dropout率 detector_argparse.add_argument('--alpha_', type=float, default=0.2, help='slope coefficient of leaky-relu or elu') # leaky-relu或elu的斜率系数 detector_argparse.add_argument('--smooth', action='store_true', default=False, help='use smooth activation elu (rather than leaky-relu) in the GAT layer.') # 在GAT层使用平滑激活函数elu detector_argparse.add_argument('--batch_size', type=int, default=128, help='mini-batch size') # mini-batch大小 detector_argparse.add_argument('--epochs', type=int, default=50, help='number of epochs to train.') # 训练的epoch数 detector_argparse.add_argument('--lr', type=float, default=0.001, help='initial learning rate.') # 初始学习率 detector_argparse.add_argument('--weight_decay', type=float, default=0e-4, help='coefficient of weight decay') # 权重衰减系数 # 定义与数据集相关的命令行参数 dataset_argparse = cmd_md.add_argument_group(title='data_producer') detector_argparse.add_argument('--cache', action='store_true', default=False, help='use cache data or not.') # 是否使用缓存数据 # 定义与模式相关的命令行参数 mode_argparse = cmd_md.add_argument_group(title='mode') mode_argparse.add_argument('--mode', type=str, default='train', choices=['train', 'test'], required=False, help='learn a model or test it.') # 学习模型或测试模型的模式 mode_argparse.add_argument('--model_name', type=str, default='xxxxxxxx-xxxxxx', required=False, help='suffix date of a tested model name.') # 测试模型名称的后缀日期 # 定义主函数 def _main(): args = cmd_md.parse_args() # 根据参数创建数据集
dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature'))
0
2023-11-27 02:00:23+00:00
16k
Vali-98/XTTS-RVC-UI
rvc.py
[ { "identifier": "SynthesizerTrnMs256NSFsid", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_he...
from multiprocessing import cpu_count from pathlib import Path from fairseq import checkpoint_utils from scipy.io import wavfile from infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from vc_infer_pipeline import VC import torch import librosa import numpy as np
11,030
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1:
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1:
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
0
2023-11-30 08:47:28+00:00
16k
ubc-vision/nf-soft-mining
examples/utils.py
[ { "identifier": "OccGridEstimator", "path": "nerfacc/estimators/occ_grid.py", "snippet": "class OccGridEstimator(AbstractEstimator):\n \"\"\"Occupancy grid transmittance estimator for spatial skipping.\n\n References: \"Instant Neural Graphics Primitives.\"\n\n Args:\n roi_aabb: The axis...
import random import numpy as np import torch from typing import Optional, Sequence from typing import Literal from typing_extensions import Literal from datasets.utils import Rays, namedtuple_map from torch.utils.data._utils.collate import collate, default_collate_fn_map from nerfacc.estimators.occ_grid import OccGridEstimator from nerfacc.estimators.prop_net import PropNetEstimator from nerfacc.grid import ray_aabb_intersect, traverse_grids from nerfacc.volrend import ( accumulate_along_rays_, render_weight_from_density, rendering, )
13,479
@torch.no_grad() def render_image_with_occgrid_test( max_samples: int, # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, early_stop_eps: float = 1e-4, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = rays.origins[ray_indices] t_dirs = rays.viewdirs[ray_indices] positions = ( t_origins + t_dirs * (t_starts[:, None] + t_ends[:, None]) / 2.0 ) if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) device = rays.origins.device opacity = torch.zeros(num_rays, 1, device=device) depth = torch.zeros(num_rays, 1, device=device) rgb = torch.zeros(num_rays, 3, device=device) ray_mask = torch.ones(num_rays, device=device).bool() # 1 for synthetic scenes, 4 for real scenes min_samples = 1 if cone_angle == 0 else 4 iter_samples = total_samples = 0 rays_o = rays.origins rays_d = rays.viewdirs near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane) far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane) t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, estimator.aabbs) n_grids = estimator.binaries.size(0) if n_grids > 1: t_sorted, t_indices = torch.sort(torch.cat([t_mins, t_maxs], -1), -1) else: t_sorted = torch.cat([t_mins, t_maxs], -1) t_indices = torch.arange( 0, n_grids * 2, device=t_mins.device, dtype=torch.int64 ).expand(num_rays, n_grids * 2) opc_thre = 1 - early_stop_eps while iter_samples < max_samples: n_alive = ray_mask.sum().item() if n_alive == 0: break # the number of samples to add on each ray n_samples = max(min(num_rays // n_alive, 64), min_samples) iter_samples += n_samples # ray marching (intervals, samples, termination_planes) = traverse_grids( # rays rays_o, # [n_rays, 3] rays_d, # [n_rays, 3] # grids estimator.binaries, # [m, resx, resy, resz] estimator.aabbs, # [m, 6] # options near_planes, # [n_rays] far_planes, # [n_rays] render_step_size, cone_angle, n_samples, True, ray_mask, # pre-compute intersections t_sorted, # [n_rays, m*2] t_indices, # [n_rays, m*2] hits, # [n_rays, m] ) t_starts = intervals.vals[intervals.is_left] t_ends = intervals.vals[intervals.is_right] ray_indices = samples.ray_indices[samples.is_valid] packed_info = samples.packed_info # get rgb and sigma from radiance field rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices) # volume rendering using native cuda scan
""" Copyright (c) 2022 Ruilong Li, UC Berkeley. """ try: except ImportError: NERF_SYNTHETIC_SCENES = [ "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", ] MIPNERF360_UNBOUNDED_SCENES = [ "garden", "bicycle", "bonsai", "counter", "kitchen", "room", "stump", ] LLFF_NDC_SCENES = [ "fern", "flower", "fortress", "horns", "leaves", "orchids", "room_llff", "trex", ] def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def render_image_with_occgrid( # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, # test options test_chunk_size: int = 8192, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) sigmas = radiance_field.query_density(positions, t) else: sigmas = radiance_field.query_density(positions) return sigmas.squeeze(-1) def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) results = [] chunk = ( torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size ) for i in range(0, num_rays, chunk): chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) ray_indices, t_starts, t_ends = estimator.sampling( chunk_rays.origins, chunk_rays.viewdirs, sigma_fn=sigma_fn, near_plane=near_plane, far_plane=far_plane, render_step_size=render_step_size, stratified=radiance_field.training, cone_angle=cone_angle, alpha_thre=alpha_thre, ) rgb, opacity, depth, extras = rendering( t_starts, t_ends, ray_indices, n_rays=chunk_rays.origins.shape[0], rgb_sigma_fn=rgb_sigma_fn, render_bkgd=render_bkgd, ) chunk_results = [rgb, opacity, depth, len(t_starts)] results.append(chunk_results) colors, opacities, depths, n_rendering_samples = [ torch.cat(r, dim=0) if isinstance(r[0], torch.Tensor) else r for r in zip(*results) ] return ( colors.view((*rays_shape[:-1], -1)), opacities.view((*rays_shape[:-1], -1)), depths.view((*rays_shape[:-1], -1)), sum(n_rendering_samples), ) def render_image_with_propnet( # scene radiance_field: torch.nn.Module, proposal_networks: Sequence[torch.nn.Module], estimator: PropNetEstimator, rays: Rays, # rendering options num_samples: int, num_samples_per_prop: Sequence[int], near_plane: Optional[float] = None, far_plane: Optional[float] = None, sampling_type: Literal["uniform", "lindisp"] = "lindisp", opaque_bkgd: bool = True, render_bkgd: Optional[torch.Tensor] = None, # train options proposal_requires_grad: bool = False, # test options test_chunk_size: int = 8192, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def prop_sigma_fn(t_starts, t_ends, proposal_network): t_origins = chunk_rays.origins[..., None, :] t_dirs = chunk_rays.viewdirs[..., None, :].detach() positions = t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0 sigmas = proposal_network(positions) if opaque_bkgd: sigmas[..., -1, :] = torch.inf return sigmas.squeeze(-1) def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[..., None, :] t_dirs = chunk_rays.viewdirs[..., None, :].repeat_interleave( t_starts.shape[-1], dim=-2 ) positions = t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0 rgb, sigmas = radiance_field(positions, t_dirs) if opaque_bkgd: sigmas[..., -1, :] = torch.inf return rgb, sigmas.squeeze(-1) results = [] chunk = ( torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size ) for i in range(0, num_rays, chunk): chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) t_starts, t_ends = estimator.sampling( prop_sigma_fns=[ lambda *args: prop_sigma_fn(*args, p) for p in proposal_networks ], prop_samples=num_samples_per_prop, num_samples=num_samples, n_rays=chunk_rays.origins.shape[0], near_plane=near_plane, far_plane=far_plane, sampling_type=sampling_type, stratified=radiance_field.training, requires_grad=proposal_requires_grad, ) rgb, opacity, depth, extra = rendering( t_starts, t_ends, ray_indices=None, n_rays=None, rgb_sigma_fn=rgb_sigma_fn, render_bkgd=render_bkgd, ) ray_indices = torch.arange(0, t_starts.shape[0], device=t_starts.device).repeat_interleave(t_starts.shape[1]).flatten() unique_indices, inverse = torch.unique(ray_indices, sorted=True, return_inverse=True) binnums = torch.bincount(inverse) start_positions = torch.cat([torch.tensor([0], dtype=torch.int, device=ray_indices.device), torch.cumsum(binnums, 0)[:-1]]) ray_a = torch.stack([unique_indices.int(), start_positions.int(), binnums], dim=1) chunk_results = [rgb, opacity, depth, extra, ray_a] results.append(chunk_results) colors, opacities, depths, extras, rays_a = collate( results, collate_fn_map={ **default_collate_fn_map, torch.Tensor: lambda x, **_: torch.cat(x, 0), }, ) distkwarg = {"ws": extras['weights'].flatten(), "deltas": (t_ends - t_starts).flatten(), "ts": t_starts.flatten(), "rays_a": rays_a} return ( colors.view((*rays_shape[:-1], -1)), opacities.view((*rays_shape[:-1], -1)), depths.view((*rays_shape[:-1], -1)), extras, distkwarg ) @torch.no_grad() def render_image_with_occgrid_test( max_samples: int, # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, early_stop_eps: float = 1e-4, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = rays.origins[ray_indices] t_dirs = rays.viewdirs[ray_indices] positions = ( t_origins + t_dirs * (t_starts[:, None] + t_ends[:, None]) / 2.0 ) if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) device = rays.origins.device opacity = torch.zeros(num_rays, 1, device=device) depth = torch.zeros(num_rays, 1, device=device) rgb = torch.zeros(num_rays, 3, device=device) ray_mask = torch.ones(num_rays, device=device).bool() # 1 for synthetic scenes, 4 for real scenes min_samples = 1 if cone_angle == 0 else 4 iter_samples = total_samples = 0 rays_o = rays.origins rays_d = rays.viewdirs near_planes = torch.full_like(rays_o[..., 0], fill_value=near_plane) far_planes = torch.full_like(rays_o[..., 0], fill_value=far_plane) t_mins, t_maxs, hits = ray_aabb_intersect(rays_o, rays_d, estimator.aabbs) n_grids = estimator.binaries.size(0) if n_grids > 1: t_sorted, t_indices = torch.sort(torch.cat([t_mins, t_maxs], -1), -1) else: t_sorted = torch.cat([t_mins, t_maxs], -1) t_indices = torch.arange( 0, n_grids * 2, device=t_mins.device, dtype=torch.int64 ).expand(num_rays, n_grids * 2) opc_thre = 1 - early_stop_eps while iter_samples < max_samples: n_alive = ray_mask.sum().item() if n_alive == 0: break # the number of samples to add on each ray n_samples = max(min(num_rays // n_alive, 64), min_samples) iter_samples += n_samples # ray marching (intervals, samples, termination_planes) = traverse_grids( # rays rays_o, # [n_rays, 3] rays_d, # [n_rays, 3] # grids estimator.binaries, # [m, resx, resy, resz] estimator.aabbs, # [m, 6] # options near_planes, # [n_rays] far_planes, # [n_rays] render_step_size, cone_angle, n_samples, True, ray_mask, # pre-compute intersections t_sorted, # [n_rays, m*2] t_indices, # [n_rays, m*2] hits, # [n_rays, m] ) t_starts = intervals.vals[intervals.is_left] t_ends = intervals.vals[intervals.is_right] ray_indices = samples.ray_indices[samples.is_valid] packed_info = samples.packed_info # get rgb and sigma from radiance field rgbs, sigmas = rgb_sigma_fn(t_starts, t_ends, ray_indices) # volume rendering using native cuda scan
weights, _, alphas = render_weight_from_density(
5
2023-11-27 22:12:55+00:00
16k
facebookresearch/SOC-matching
main.py
[ { "identifier": "get_folder_name", "path": "SOC_matching/utils.py", "snippet": "def get_folder_name(cfg):\n folder_name = (\n cfg.method.algorithm\n + \"_\"\n + cfg.method.setting\n + \"_\"\n + str(cfg.method.lmbd)\n + \"_\"\n + str(cfg.method.T)\n ...
import torch import sys import logging import os import time import json import hydra import traceback from tqdm.notebook import tqdm from omegaconf import DictConfig from SOC_matching.utils import ( get_folder_name, get_file_name, control_objective, save_results, compute_EMA, normalization_constant, ) from SOC_matching.method import ( SOC_Solver, ) from SOC_matching.experiment_settings.settings import define_variables
11,786
logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg) file_name = get_file_name(folder_name, num_iterations=cfg.method.num_iterations) EMA_loss = 0 EMA_norm_sqd_diff = 0 EMA_coeff = 0.01 EMA_weight_mean_coeff = 0.002 x0, sigma, optimal_sde, neural_sde, u_warm_start = define_variables(cfg, ts) if optimal_sde is not None: ground_truth_control = optimal_sde.u else: ground_truth_control = None state0 = x0.repeat(cfg.optim.batch_size, 1) ########### Compute normalization constant and control L2 error for initial control ############ print( f"Estimating normalization constant and control L2 error for initial control..." ) ( normalization_const, normalization_const_std_error, norm_sqd_diff_mean, ) = normalization_constant( neural_sde, state0, ts, cfg, n_batches_normalization=512, ground_truth_control=ground_truth_control, ) print( f"Normalization_constant (mean and std. error): {normalization_const:5.8E} {normalization_const_std_error:5.8E}" ) if ground_truth_control is not None: print( f"Control L2 error for initial control: {norm_sqd_diff_mean / normalization_const}" ) ########### Compute control loss for optimal control ############ if optimal_sde is not None: ( optimal_control_objective_mean, optimal_control_objective_std_error, ) = control_objective( optimal_sde, x0, ts, cfg.method.lmbd, cfg.optim.batch_size, total_n_samples=cfg.method.n_samples_control, verbose=False, ) print( f"Optimal control loss mean: {optimal_control_objective_mean:5.10f}, Optimal control loss std. error: {optimal_control_objective_std_error:5.10f}" )
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory. log = logging.getLogger(__name__) @hydra.main(version_base=None, config_path="configs", config_name="soc") def main(cfg: DictConfig): logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg) file_name = get_file_name(folder_name, num_iterations=cfg.method.num_iterations) EMA_loss = 0 EMA_norm_sqd_diff = 0 EMA_coeff = 0.01 EMA_weight_mean_coeff = 0.002 x0, sigma, optimal_sde, neural_sde, u_warm_start = define_variables(cfg, ts) if optimal_sde is not None: ground_truth_control = optimal_sde.u else: ground_truth_control = None state0 = x0.repeat(cfg.optim.batch_size, 1) ########### Compute normalization constant and control L2 error for initial control ############ print( f"Estimating normalization constant and control L2 error for initial control..." ) ( normalization_const, normalization_const_std_error, norm_sqd_diff_mean, ) = normalization_constant( neural_sde, state0, ts, cfg, n_batches_normalization=512, ground_truth_control=ground_truth_control, ) print( f"Normalization_constant (mean and std. error): {normalization_const:5.8E} {normalization_const_std_error:5.8E}" ) if ground_truth_control is not None: print( f"Control L2 error for initial control: {norm_sqd_diff_mean / normalization_const}" ) ########### Compute control loss for optimal control ############ if optimal_sde is not None: ( optimal_control_objective_mean, optimal_control_objective_std_error, ) = control_objective( optimal_sde, x0, ts, cfg.method.lmbd, cfg.optim.batch_size, total_n_samples=cfg.method.n_samples_control, verbose=False, ) print( f"Optimal control loss mean: {optimal_control_objective_mean:5.10f}, Optimal control loss std. error: {optimal_control_objective_std_error:5.10f}" )
soc_solver = SOC_Solver(
6
2023-12-04 20:26:18+00:00
16k
yiwenlu66/learning-qp
src/modules/qp_unrolled_network.py
[ { "identifier": "QPSolver", "path": "src/modules/qp_solver.py", "snippet": "class QPSolver(nn.Module):\n \"\"\"\n Solve QP problem:\n minimize (1/2)x'Px + q'x\n subject to Hx + b >= 0,\n where x in R^n, b in R^m.\n \"\"\"\n def __init__(self, device, n, m,\n P=None, P...
import torch import numpy as np import scipy import functools import os from torch import nn from ..modules.qp_solver import QPSolver from ..modules.warm_starter import WarmStarter from ..utils.torch_utils import make_psd, interpolate_state_dicts from ..utils.mpc_utils import mpc2qp, scenario_robust_mpc, tube_robust_mpc from ..utils.osqp_utils import osqp_oracle from ..utils.np_batch_op import np_batch_op from concurrent.futures import ThreadPoolExecutor
11,241
if not self.strict_affine_layer: self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric) else: self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref) if self.n_mlp_output > 0: self.mlp = mlp_builder(input_size, self.n_mlp_output) else: self.mlp = None # TODO: add preconditioner self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.train_warm_starter = train_warm_starter self.ws_loss_coef = ws_loss_coef self.ws_update_rate = ws_update_rate self.ws_loss_shaper = ws_loss_shaper # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True) self.fixed_PH = is_test and shared_PH # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning self.autonomous_losses = {} self.mpc_baseline = mpc_baseline self.use_osqp_for_mpc = use_osqp_for_mpc self.imitate_mpc = imitate_mpc # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem self.use_residual_loss = use_residual_loss # Whether to force the problem to be feasible self.force_feasible = force_feasible self.feasible_lambda = feasible_lambda self.solver = None self.info = {} # Reserved for storing the controllers for each simulation instance when robust MPC is enabled self.robust_controllers = [] # Store info returned by env self.env_info = {} # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization self.is_active = None def initialize_solver(self): # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable) n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead if not self.fixed_PH: self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) else: # Should be called after loading state dict Pinv, H = self.get_PH() self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs): qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H]) X0 = self.warm_starter(qd, bd, Pinvd, Hd) gt = solver_Xs[:, -1, :].detach() return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean()) def parallel_controller_creation(self, controller_creator, xref_np, bs): """ Create robust MPC controlller in parallel """ # Helper function for parallel execution def task_creator(index): return controller_creator(self.mpc_baseline, xref_np[index, :]) with ThreadPoolExecutor() as executor: # Executing the tasks in parallel results = executor.map(task_creator, range(bs)) # Collecting the results self.robust_controllers.extend(results) def run_mpc_baseline(self, x, use_osqp_oracle=False): robust_method = self.mpc_baseline.get("robust_method", None) x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x) bs = x.shape[0] # Conversions between torch and np t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float) f = lambda t: t.detach().cpu().numpy() f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy()) if robust_method is None: # Run vanilla MPC without robustness eps = 1e-3 n, m, P, q, H, b = mpc2qp( self.mpc_baseline["n_mpc"], self.mpc_baseline["m_mpc"], self.mpc_baseline["N"], t(self.mpc_baseline["A"]), t(self.mpc_baseline["B"]), t(self.mpc_baseline["Q"]), t(self.mpc_baseline["R"]), self.mpc_baseline["x_min"] + eps, self.mpc_baseline["x_max"] - eps, self.mpc_baseline["u_min"], self.mpc_baseline["u_max"], x0, xref, normalize=self.mpc_baseline.get("normalize", False), Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]), ) if not use_osqp_oracle: solver = QPSolver(x.device, n, m, P=P, H=H) Xs, primal_sols = solver(q, b, iters=100) sol = primal_sols[:, -1, :] else:
class StrictAffineLayer(nn.Module): """ Layer mapping from obs to (q, b) in the strict affine form. """ def __init__(self, input_size, n, m, obs_has_half_ref): super().__init__() self.obs_has_half_ref = obs_has_half_ref self.input_size = input_size self.q_layer = nn.Linear(self.input_size, n, bias=False) if not self.obs_has_half_ref: self.b_layer = nn.Linear(self.input_size // 2, m, bias=True) else: self.b_layer = nn.Linear(self.input_size, m, bias=True) def forward(self, x): if not self.obs_has_half_ref: x0 = x[:, :self.input_size // 2] else: x0 = x q = self.q_layer(x) b = self.b_layer(x0) return torch.cat([q, b], dim=1) class QPUnrolledNetwork(nn.Module): """ Learn a QP problem from the input using a MLP, then solve the QP using fixed number of unrolled PDHG iterations. Form of QP: minimize (1/2)x'Px + q'x subject to Hx + b >= 0, where x in R^n, b in R^m. """ def __init__( self, device, input_size, n_qp, m_qp, qp_iter, mlp_builder, shared_PH=False, affine_qb=False, strict_affine_layer=False, obs_has_half_ref=False, symmetric=False, no_b=False, use_warm_starter=False, train_warm_starter=False, ws_loss_coef=1., ws_update_rate=0.01, ws_loss_shaper=lambda x: x ** (1 / 2), mpc_baseline=None, use_osqp_for_mpc=False, imitate_mpc=False, use_residual_loss=False, force_feasible=False, feasible_lambda=10, is_test=False, ): """mlp_builder is a function mapping (input_size, output_size) to a nn.Sequential object. If shared_PH == True, P and H are parameters indepedent of input, and q and b are functions of input; Otherwise, (P, H, q, b) are all functions of input. If affine_qb == True, then q and b are restricted to be affine functions of input. If strict_affine_layer == True (only effective when affine_qb=True), then: 1. q is linear w.r.t. (x0, xref) (no bias) 2. b is affine w.r.t. x0 (no dependence on xref) If obs_has_half_ref == True, the policy knows that the observation is in the form (x0, xref), with each taking up half of the dimension of the observation. If symmetric == True (only effective when affine_qb=True), then: 1. The bias terms are disabled in the modeling of q and b, i.e., q = Wq * x, b = Wb * x. 2. The constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0. If no_b == True in addition to symmetric == True, then b is skipped altogether, i.e., the constraint is assumed to be -1 <= Hx <= 1. If mpc_baseline != None and imitate_mpc == False, then the forward function directly returns the solution of the MPC problem, instead of solving the learned QP problem. Can be used for benchmarking MPC. If mpc_baseline != None and imitate_mpc == True, then the forward function returns the solution of the learned QP problem, but a loss term is computed using the MPC problem. Can be used for supervised imitation learning. If force_feasible == True, solve the following problem instead of the original QP problem: minimize_{x,y} (1/2)x'Px + q'x + lambda * y^2 s.t. Hx + b + y * 1 >= 0, y >= 0, where x in R^n, y in R. In this case, the solution returned will be of dimension (n + 1). """ super().__init__() self.shared_PH = shared_PH self.affine_qb = affine_qb self.strict_affine_layer = strict_affine_layer self.obs_has_half_ref = obs_has_half_ref self.device = device self.input_size = input_size # QP dimensions: there are the number of variables and constraints WITHOUT considering the slack variable self.n_qp = n_qp self.m_qp = m_qp self.qp_iter = qp_iter self.symmetric = symmetric self.no_b = no_b self.n_P_param = n_qp * (n_qp + 1) // 2 self.n_q_param = n_qp self.n_H_param = m_qp * n_qp self.n_b_param = m_qp if not self.no_b else 0 self.n_mlp_output = 0 if not self.shared_PH: self.n_mlp_output += (self.n_P_param + self.n_H_param) self.P_params = None self.H_params = None else: self.P_params = nn.Parameter(torch.randn((self.n_P_param,), device=device)) self.H_params = nn.Parameter(torch.randn((self.n_H_param,), device=device)) if not self.affine_qb: self.n_mlp_output += (self.n_q_param + self.n_b_param) self.qb_affine_layer = None else: if not self.strict_affine_layer: self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric) else: self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref) if self.n_mlp_output > 0: self.mlp = mlp_builder(input_size, self.n_mlp_output) else: self.mlp = None # TODO: add preconditioner self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.train_warm_starter = train_warm_starter self.ws_loss_coef = ws_loss_coef self.ws_update_rate = ws_update_rate self.ws_loss_shaper = ws_loss_shaper # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True) self.fixed_PH = is_test and shared_PH # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning self.autonomous_losses = {} self.mpc_baseline = mpc_baseline self.use_osqp_for_mpc = use_osqp_for_mpc self.imitate_mpc = imitate_mpc # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem self.use_residual_loss = use_residual_loss # Whether to force the problem to be feasible self.force_feasible = force_feasible self.feasible_lambda = feasible_lambda self.solver = None self.info = {} # Reserved for storing the controllers for each simulation instance when robust MPC is enabled self.robust_controllers = [] # Store info returned by env self.env_info = {} # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization self.is_active = None def initialize_solver(self): # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable) n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead if not self.fixed_PH: self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) else: # Should be called after loading state dict Pinv, H = self.get_PH() self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs): qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H]) X0 = self.warm_starter(qd, bd, Pinvd, Hd) gt = solver_Xs[:, -1, :].detach() return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean()) def parallel_controller_creation(self, controller_creator, xref_np, bs): """ Create robust MPC controlller in parallel """ # Helper function for parallel execution def task_creator(index): return controller_creator(self.mpc_baseline, xref_np[index, :]) with ThreadPoolExecutor() as executor: # Executing the tasks in parallel results = executor.map(task_creator, range(bs)) # Collecting the results self.robust_controllers.extend(results) def run_mpc_baseline(self, x, use_osqp_oracle=False): robust_method = self.mpc_baseline.get("robust_method", None) x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x) bs = x.shape[0] # Conversions between torch and np t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float) f = lambda t: t.detach().cpu().numpy() f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy()) if robust_method is None: # Run vanilla MPC without robustness eps = 1e-3 n, m, P, q, H, b = mpc2qp( self.mpc_baseline["n_mpc"], self.mpc_baseline["m_mpc"], self.mpc_baseline["N"], t(self.mpc_baseline["A"]), t(self.mpc_baseline["B"]), t(self.mpc_baseline["Q"]), t(self.mpc_baseline["R"]), self.mpc_baseline["x_min"] + eps, self.mpc_baseline["x_max"] - eps, self.mpc_baseline["u_min"], self.mpc_baseline["u_max"], x0, xref, normalize=self.mpc_baseline.get("normalize", False), Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]), ) if not use_osqp_oracle: solver = QPSolver(x.device, n, m, P=P, H=H) Xs, primal_sols = solver(q, b, iters=100) sol = primal_sols[:, -1, :] else:
osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True)
7
2023-11-28 05:56:22+00:00
16k
Fraunhofer-SCAI/llamol
sample.py
[ { "identifier": "Transformer", "path": "model.py", "snippet": "class Transformer(nn.Module):\n last_loss: Optional[torch.Tensor]\n\n def __init__(self, params: ModelArgs, context_params: ContextArgs):\n super().__init__()\n self.params = params\n self.context_params = context_...
import os import sys import time import pandas as pd import torch import numpy as np import re import logging import argparse import rdkit.rdBase as rkrb import rdkit.RDLogger as rkl from contextlib import nullcontext from tqdm.auto import tqdm from model import Transformer from plot_utils import ( check_metrics, plot_1D_condition, plot_2D_condition, plot_3D_condition, plot_unconditional, ) from tokenizer import SmilesTokenizer from typing import Dict, List, Tuple, Union from rdkit import Chem from rdkit import DataStructs from rdkit.Chem.Fingerprints import FingerprintMols
13,254
i * gens_per_step : (i + 1) * gens_per_step ] for c in context_cols.keys() } context_dict = {"context": cd, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[ 1:-1 ] context_tensor = torch.tensor( [incorporate_selfie] * gens_per_step, dtype=torch.long, device=self.device, ) context_dict["fragment"] = context_tensor context_cols = list(context_cols.keys()) else: context_dict = self.get_context( context_cols, context_smi, num_examples=gens_per_step ) # for k in range(num_samples): y = self.model.generate( self.tokenizer, context=context_dict["context"], fragments=context_dict["fragment"], start_smiles=start_smiles, num_gen=gens_per_step, temperature=temperature, top_k=top_k, max_length=max_new_tokens, device=self.device, cache_kv=use_kv_cache, ) new_context = {k: [] for k in context_dict["context"]} for i, sample in enumerate(y): # print(sample) mol = Chem.MolFromSmiles(sample) if mol is not None: out_smiles.append(sample) for k in new_context: new_context[k].append( context_dict["context"][k][i].unsqueeze(-1) ) for k in new_context: new_context[k] = torch.concat(new_context[k], dim=0) if context is None: context = new_context else: for k in context: context[k] = torch.concat( [context[k], new_context[k]], dim=0 ) pbar.update(1) logger.info( f"Number valid generated: {len(out_smiles) / num_samples * 100} %" ) logger.info("---------------") if return_context: return (out_smiles, context) else: return out_smiles @torch.no_grad() def generate_with_evaluation( self, context_cols: Union[List[str], None] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, cmp_context_dict: Union[Dict[str, torch.Tensor], None] = None, total_gen_steps: int = 1, use_kv_cache: bool = False, ): out_smiles, new_context = self.generate( context_cols=context_cols, context_smi=context_smi, start_smiles=start_smiles, num_samples=num_samples, max_new_tokens=max_new_tokens, temperature=temperature, top_k=top_k, return_context=True, total_gen_steps=total_gen_steps, use_kv_cache=use_kv_cache, ) out_dir = os.path.dirname(self.load_path) if context_cols is not None: if len(context_cols) == 1: plot_1D_condition( context_cols, os.path.join(out_dir, "plots"), new_context, out_smiles, temperature, cmp_context_dict, context_scaler=None, ) elif len(context_cols) == 2:
# from tqdm.notebook import tqdm logger = logging.getLogger(__name__) class Sampler: def __init__( self, load_path: str, device: str = "cpu", seed: int = 1337, dtype: str = "float16", compile: bool = True, quantize: bool = False, ) -> None: self.load_path = load_path self.device = device self.dtype = dtype self.compile = compile self.quantize = quantize self.seed = seed self._init_model() def _init_model(self): np.random.seed(self.seed) torch.cuda.manual_seed(self.seed) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn self.device_type = ( "cuda" if "cuda" in self.device else "cpu" ) # for later use in torch.autocast ptdtype = { "float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16, }[self.dtype] self.ptdtype = ptdtype self.ctx = self._autocast() # init from a model saved in a specific directory # ckpt_path = os.path.join(out_dir, "ckpt_full_dim=256.pt") self.model = Transformer.load(self.load_path, device=self.device) self.model.eval() if self.quantize: raise NotImplementedError("Not properly implemented for CPU / GPU") self.model = torch.ao.quantization.quantize_dynamic( self.model, # the original model {torch.nn.Linear}, # a set of layers to dynamically quantize dtype=torch.qint8, ) if self.compile: logger.info("Compiling the model...") self.model = torch.compile(self.model) # requires PyTorch 2.0 (optional) self.model = self.model.to(self.device) # load the tokenizer self.tokenizer = SmilesTokenizer() def get_context( self, context_col: List[str], context_smi: str, num_examples: int = 50, ): """ Returns a dictionary in the form of { "fragment": torch.tensor, "context": { "logp": torch.tensor, "sascore": torch.tensor, "mol_weight": torch.tensor } } When context_smi is set to a string, then the "fragment" field is populated. All of the properties listed in the context_col list is set to the keys and the values are set to a resonable range for each property. num_examples indicates how many values are sampled for each property. """ output_dict = {"context": {}, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[1:-1] context = torch.tensor( [incorporate_selfie] * num_examples, dtype=torch.long, device=self.device, ) output_dict["fragment"] = context if context_col is None: return output_dict if "logp" in context_col: # context = 0.5 * torch.randint( # -8, 14, (num_examples,), device=self.device, dtype=torch.float # ) # context = 0.5 * torch.randint( # -6, 6, (num_examples, 1), device=device, dtype=torch.float # ) context = torch.tensor( np.random.choice([-2, 0, 2], (num_examples,)), device=self.device, dtype=self.ptdtype, ) # context = 2.0 * torch.ones( # (num_examples,1), device=device, dtype=torch.float # ) # context = -2.0*torch.ones((num_examples,2),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["logp"] = context if "energy" in context_col: context = 0.1 * torch.randint( -15, 15, (num_examples,), device=self.device, dtype=torch.float ) # context = -2.0*torch.ones((num_examples,2),device=device,dtype=torch.float) context, _ = torch.sort(context, 0) output_dict["context"]["energy"] = context if "sascore" in context_col: # context = 0.5 * torch.randint( # 2, 20, (num_examples, ), device=self.device, dtype=torch.float # ) context = torch.tensor( np.random.choice([2, 3, 4], (num_examples,)), device=self.device, dtype=torch.float, ) # context = 0.5 * torch.randint( # 4, 8, (num_examples, 1), device=device, dtype=torch.float # ) # context = 2.0*torch.ones((num_examples,1),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["sascore"] = context if "mol_weight" in context_col: # context = 0.5 * torch.randint( # 2, 20, (num_examples,), device=self.device, dtype=torch.float # ) context = torch.tensor( np.random.choice([2.0, 3.0, 4.0], (num_examples,)), device=self.device, dtype=torch.float, ) # context = 0.5 * torch.randint( # 2, 20, (num_examples, 1), device=device, dtype=torch.float # ) # context = 2.5*torch.ones((num_examples,1),device=device,dtype=torch.float) # context, _ = torch.sort(context, 0) output_dict["context"]["mol_weight"] = context return output_dict def _autocast(self): if "cuda" in self.device: if self.dtype == "bfloat16" and torch.cuda.is_bf16_supported(): return torch.cuda.amp.autocast(dtype=torch.bfloat16) elif self.dtype == "float16": return torch.cuda.amp.autocast(dtype=torch.float16) else: return torch.cuda.amp.autocast(dtype=torch.float32) else: # cpu return nullcontext() @torch.no_grad() def generate( self, context_cols: Union[List[str], None, Dict[str, torch.Tensor]] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, return_context: bool = False, total_gen_steps: int = 1, use_kv_cache: bool = False, ) -> Union[List[str], Tuple[List[str], List[float]]]: """ Generates a list of SMILES. With the default options it would generate them unconditionally. Params: - context_cols : When a list the context is randomly sampled from the get_context method, when given a dictionary the context values are taken from the dictionary instead. - context_smi : Further conditioning by the usage of a molecular fragment . start_smiles : Can be used to start the SMILES with a specific string, the model then generates the next tokens including that start sequence. - num_samples : Controlls how many SMILES in total will be generated be the model. - max_new_tokens : Controlls the maximum length of each SMILES (in tokens) that is generated. - temperature: Controlls the randomness of the model. A temperature = 1.0 means it is the trained distribution. A temperature < 1 is more deterministic and temperature > 1 is more random - top_k : Clamps the probability distribution to the top k tokens. From these the next token is then sampled from. - return_context : Whether the context that was given to the model should be returned. - total_gen_steps : In how many sub steps the generation should be split up to. Useful when generation 10k + SMILES and wanting to chunk these into for example 10 * 1k generations with total_gen_steps = 10. - use_kv_cache: Runs the generation using kv-caching. It is faster, but takes more memory. """ with self.ctx: gens_per_step = num_samples // total_gen_steps logger.debug(f"Gens per Step: {gens_per_step}") context = None # {"context": None, "fragment" : None} out_smiles = [] with tqdm(total=total_gen_steps, desc="Batch") as pbar: for i in range(total_gen_steps): if isinstance(context_cols, dict): # TODO: Test if same length cd = { c: context_cols[c][ i * gens_per_step : (i + 1) * gens_per_step ] for c in context_cols.keys() } context_dict = {"context": cd, "fragment": None} if context_smi is not None: logger.debug( f"context_smiles: {context_smi}", ) # NOTE: Remove beginning [CLS] and end token [SEP] incorporate_selfie = self.tokenizer.encode(context_smi)[ 1:-1 ] context_tensor = torch.tensor( [incorporate_selfie] * gens_per_step, dtype=torch.long, device=self.device, ) context_dict["fragment"] = context_tensor context_cols = list(context_cols.keys()) else: context_dict = self.get_context( context_cols, context_smi, num_examples=gens_per_step ) # for k in range(num_samples): y = self.model.generate( self.tokenizer, context=context_dict["context"], fragments=context_dict["fragment"], start_smiles=start_smiles, num_gen=gens_per_step, temperature=temperature, top_k=top_k, max_length=max_new_tokens, device=self.device, cache_kv=use_kv_cache, ) new_context = {k: [] for k in context_dict["context"]} for i, sample in enumerate(y): # print(sample) mol = Chem.MolFromSmiles(sample) if mol is not None: out_smiles.append(sample) for k in new_context: new_context[k].append( context_dict["context"][k][i].unsqueeze(-1) ) for k in new_context: new_context[k] = torch.concat(new_context[k], dim=0) if context is None: context = new_context else: for k in context: context[k] = torch.concat( [context[k], new_context[k]], dim=0 ) pbar.update(1) logger.info( f"Number valid generated: {len(out_smiles) / num_samples * 100} %" ) logger.info("---------------") if return_context: return (out_smiles, context) else: return out_smiles @torch.no_grad() def generate_with_evaluation( self, context_cols: Union[List[str], None] = None, context_smi: Union[str, None] = None, start_smiles: Union[str, None] = None, num_samples: int = 50, max_new_tokens: int = 256, temperature: float = 1.0, top_k: Union[int, None] = None, cmp_context_dict: Union[Dict[str, torch.Tensor], None] = None, total_gen_steps: int = 1, use_kv_cache: bool = False, ): out_smiles, new_context = self.generate( context_cols=context_cols, context_smi=context_smi, start_smiles=start_smiles, num_samples=num_samples, max_new_tokens=max_new_tokens, temperature=temperature, top_k=top_k, return_context=True, total_gen_steps=total_gen_steps, use_kv_cache=use_kv_cache, ) out_dir = os.path.dirname(self.load_path) if context_cols is not None: if len(context_cols) == 1: plot_1D_condition( context_cols, os.path.join(out_dir, "plots"), new_context, out_smiles, temperature, cmp_context_dict, context_scaler=None, ) elif len(context_cols) == 2:
plot_2D_condition(
3
2023-11-28 09:50:31+00:00
16k
lampmerchant/tashrouter
tashrouter/router/router.py
[ { "identifier": "RoutingTable", "path": "tashrouter/router/routing_table.py", "snippet": "class RoutingTable:\n '''A Router's routing table.'''\n \n STATE_GOOD = 1\n STATE_SUS = 2\n STATE_BAD = 3\n STATE_WORST = 4\n \n def __init__(self, router):\n self._router = router\n self._entry_by_ne...
import logging from .routing_table import RoutingTable from .zone_information_table import ZoneInformationTable from ..datagram import Datagram from ..service.echo import EchoService from ..service.name_information import NameInformationService from ..service.routing_table_aging import RoutingTableAgingService from ..service.rtmp.responding import RtmpRespondingService from ..service.rtmp.sending import RtmpSendingService from ..service.zip.responding import ZipRespondingService from ..service.zip.sending import ZipSendingService
11,972
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = ( (EchoService.ECHO_SAS, EchoService()),
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = ( (EchoService.ECHO_SAS, EchoService()),
(NameInformationService.NBP_SAS, NameInformationService()),
4
2023-12-02 15:17:07+00:00
16k
jags111/ComfyUI_Jags_Audiotools
libs/dance_diffusion/dd/inference.py
[ { "identifier": "SchedulerType", "path": "libs/diffusion_library/scheduler.py", "snippet": "class SchedulerType(str, enum.Enum):\n V_DDPM = 'V_DDPM'\n V_SPLICED_DDPM_COSINE = 'V_SPLICED_DDPM_COSINE'\n V_LOG = 'V_LOG'\n V_CRASH = 'V_CRASH'\n \n K_KARRAS = 'K_KARRAS'\n K_EXPONENTIAL =...
import torch from tqdm.auto import trange from diffusion.utils import t_to_alpha_sigma from k_diffusion.external import VDenoiser from typing import Tuple, Callable from libs.diffusion_library.scheduler import SchedulerType from libs.diffusion_library.sampler import SamplerType from libs.dance_diffusion.base.model import ModelWrapperBase from libs.dance_diffusion.base.inference import InferenceBase from libs.util.util import tensor_slerp_2D, PosteriorSampling from typing import Tuple, Callable from libs.diffusion_library.scheduler import SchedulerType from libs.diffusion_library.sampler import SamplerType from libs.dance_diffusion.base.model import ModelWrapperBase from libs.dance_diffusion.base.inference import InferenceBase from libs.util.util import tensor_slerp_2D, PosteriorSampling
11,160
callback, **sampler_args ).float() def generate_interpolation( self, callback: Callable = None, batch_size: int = None, # seed: int = None, interpolation_positions: list[float] = None, audio_source: torch.Tensor = None, audio_target: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] step_list[-1] += 1e-7 #HACK avoid division by 0 in reverse sampling model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[:-1] #HACK avoid division by 0 in reverse sampling model = VDenoiser(self.model.model) if self.optimize_memory_use and batch_size < 2: x_0_source = audio_source x_0_target = audio_target with self.offload_context(self.model.model): x_T_source = sampler.sample( model, x_0_source, step_list.flip(0), callback, **sampler_args ) with self.offload_context(self.model.model): x_T_target = sampler.sample( model, x_0_target, step_list.flip(0), callback, **sampler_args ) x_T = torch.cat([x_T_source, x_T_target], dim=0) else: x_0 = torch.cat([audio_source, audio_target], dim=0) with self.offload_context(self.model.model): x_T = sampler.sample( model, x_0, step_list.flip(0), callback, **sampler_args ) if SamplerType.is_v_sampler(sampler): #HACK reset schedule after hack step_list[-1] = 0.0 else: step_list = torch.cat([step_list, step_list.new_zeros([1])]) x_Int = torch.empty([batch_size, 2, self.model.chunk_size], device=self.device_accelerator) for pos in range(len(interpolation_positions)): x_Int[pos] = tensor_slerp_2D(x_T[0], x_T[1], interpolation_positions[pos]) with self.offload_context(self.model.model): return sampler.sample( model, x_Int, step_list, callback, **sampler_args ).float() def generate_inpainting( self, callback: Callable = None, batch_size: int = None, seed: int = None, audio_source: torch.Tensor = None, expansion_map: list[int] = None, mask: torch.Tensor = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, inpainting_args = None, **kwargs ) -> torch.Tensor: self.generator.manual_seed(seed) method = inpainting_args.get('method') if(method == 'repaint'): raise Exception("Repaint currently not supported due to changed requirements") elif(method == 'posterior_guidance'): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) if SamplerType.is_v_sampler(sampler): raise Exception('V-Sampler currently not supported for posterior guidance. Please choose a K-Sampler.') else: x_T = audio_source + step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)
class DDInference(InferenceBase): def __init__( self, device_accelerator: torch.device = None, device_offload: torch.device = None, optimize_memory_use: bool = False, use_autocast: bool = True, model: ModelWrapperBase = None ): super().__init__(device_accelerator, device_offload, optimize_memory_use, use_autocast, model) def generate( self, callback: Callable = None, batch_size: int = None, seed: int = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args: dict = None, sampler: SamplerType = None, sampler_args: dict = None, **kwargs ): self.generator.manual_seed(seed) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args)#step_list = step_list[:-1] if sampler in [SamplerType.V_PRK, SamplerType.V_PLMS, SamplerType.V_PIE, SamplerType.V_PLMS2, SamplerType.V_IPLMS] else step_list if SamplerType.is_v_sampler(sampler): x_T = torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator) model = self.model.model else: x_T = step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator) model = VDenoiser(self.model.model) with self.offload_context(self.model.model): return sampler.sample( model, x_T, step_list, callback, **sampler_args ).float() def generate_variation( self, callback: Callable = None, batch_size: int = None, seed: int = None, audio_source: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: self.generator.manual_seed(seed) audio_source = self.expand(audio_source, expansion_map) if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] alpha_T, sigma_T = t_to_alpha_sigma(step_list[0]) x_T = alpha_T * audio_source + sigma_T * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator) model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) x_T = audio_source + step_list[0] * torch.randn(audio_source.shape, device=audio_source.device, generator=self.generator) model = VDenoiser(self.model.model) with self.offload_context(self.model.model): return sampler.sample( model, x_T, step_list, callback, **sampler_args ).float() def generate_interpolation( self, callback: Callable = None, batch_size: int = None, # seed: int = None, interpolation_positions: list[float] = None, audio_source: torch.Tensor = None, audio_target: torch.Tensor = None, expansion_map: list[int] = None, noise_level: float = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, **kwargs ) -> torch.Tensor: if SamplerType.is_v_sampler(sampler): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[step_list < noise_level] step_list[-1] += 1e-7 #HACK avoid division by 0 in reverse sampling model = self.model.model else: scheduler_args.update(sigma_max = scheduler_args.get('sigma_max', 1.0) * noise_level) step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) step_list = step_list[:-1] #HACK avoid division by 0 in reverse sampling model = VDenoiser(self.model.model) if self.optimize_memory_use and batch_size < 2: x_0_source = audio_source x_0_target = audio_target with self.offload_context(self.model.model): x_T_source = sampler.sample( model, x_0_source, step_list.flip(0), callback, **sampler_args ) with self.offload_context(self.model.model): x_T_target = sampler.sample( model, x_0_target, step_list.flip(0), callback, **sampler_args ) x_T = torch.cat([x_T_source, x_T_target], dim=0) else: x_0 = torch.cat([audio_source, audio_target], dim=0) with self.offload_context(self.model.model): x_T = sampler.sample( model, x_0, step_list.flip(0), callback, **sampler_args ) if SamplerType.is_v_sampler(sampler): #HACK reset schedule after hack step_list[-1] = 0.0 else: step_list = torch.cat([step_list, step_list.new_zeros([1])]) x_Int = torch.empty([batch_size, 2, self.model.chunk_size], device=self.device_accelerator) for pos in range(len(interpolation_positions)): x_Int[pos] = tensor_slerp_2D(x_T[0], x_T[1], interpolation_positions[pos]) with self.offload_context(self.model.model): return sampler.sample( model, x_Int, step_list, callback, **sampler_args ).float() def generate_inpainting( self, callback: Callable = None, batch_size: int = None, seed: int = None, audio_source: torch.Tensor = None, expansion_map: list[int] = None, mask: torch.Tensor = None, steps: int = None, scheduler: SchedulerType = None, scheduler_args = None, sampler: SamplerType = None, sampler_args = None, inpainting_args = None, **kwargs ) -> torch.Tensor: self.generator.manual_seed(seed) method = inpainting_args.get('method') if(method == 'repaint'): raise Exception("Repaint currently not supported due to changed requirements") elif(method == 'posterior_guidance'): step_list = scheduler.get_step_list(steps, self.device_accelerator.type, **scheduler_args) if SamplerType.is_v_sampler(sampler): raise Exception('V-Sampler currently not supported for posterior guidance. Please choose a K-Sampler.') else: x_T = audio_source + step_list[0] * torch.randn([batch_size, 2, self.model.chunk_size], generator=self.generator, device=self.device_accelerator)
model = PosteriorSampling(
11
2023-11-28 09:09:59+00:00
16k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_fully_recourse_problem.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Opti...
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from scipy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD, BUS_I from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_programming_gurobi import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG_PV import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG, PPV from TestCaseDistributionSystems.database_management_pv import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
12,828
self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {}
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: zhaoty@ntu.edu.sg @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {}
db_management = DataBaseManagement()
15
2023-11-27 15:57:53+00:00
16k
andryyy/ehlocomputer
models/listeners.py
[ { "identifier": "defaults", "path": "config/defaults.py", "snippet": "ACCEPT_LANGUAGES = [\"en\", \"de\"]\nMAX_HISTORIC_REVISIONS = 5\nWEBAUTHN_CHALLENGE_TIMEOUT = 30 # seconds\nPROXY_AUTH_TIMEOUT = 300 # seconds\nTABLE_PAGE_SIZE = 10\nTINYDB = {\n \"storage\": RedisLockMiddleware(JSONStorage),\n ...
import json import os import re import uuid from config import defaults from config import lego from config.database import * from email_validator import validate_email from pydantic import ( AfterValidator, BaseModel, EmailStr, Field, FilePath, HttpUrl, field_validator, model_validator, validator, ) from pydantic.networks import IPv4Address, IPv6Address from typing import Annotated, Any, Literal from . import ( utc_now_as_str, ensure_list, to_unique_sorted_str_list, get_validated_fqdn, flatten, )
12,610
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = [] created: Annotated[str, Field(default_factory=utc_now_as_str)] updated: Annotated[str, Field(default_factory=utc_now_as_str)] class ListenerLegoConfig(BaseModel): lego_provider: str acme_terms_agreed: Literal[True, "true"] provider_config: dict acme_server: Annotated[str, AfterValidator(lambda x: str(HttpUrl(x)))] acme_email: EmailStr key_type: Literal["EC256", "EC384", "RSA2048", "RSA4096", "RSA8192"] = "RSA2048" domains: str @model_validator(mode="before") @classmethod def check_lego(self, data: Any) -> Any:
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = [] created: Annotated[str, Field(default_factory=utc_now_as_str)] updated: Annotated[str, Field(default_factory=utc_now_as_str)] class ListenerLegoConfig(BaseModel): lego_provider: str acme_terms_agreed: Literal[True, "true"] provider_config: dict acme_server: Annotated[str, AfterValidator(lambda x: str(HttpUrl(x)))] acme_email: EmailStr key_type: Literal["EC256", "EC384", "RSA2048", "RSA4096", "RSA8192"] = "RSA2048" domains: str @model_validator(mode="before") @classmethod def check_lego(self, data: Any) -> Any:
if data.get("lego_provider") not in lego.LEGO_DNS_PROVIDERS.keys():
1
2023-12-01 08:36:45+00:00
16k
fzmi/ubdd
models/dino/models/dino/dino.py
[ { "identifier": "box_ops", "path": "models/dino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef mask...
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from models.dino.util import box_ops from models.dino.util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
11,429
l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx, num_boxes*scalar, **kwargs)) l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) # interm_outputs loss if 'interm_outputs' in outputs: interm_outputs = outputs['interm_outputs'] indices = self.matcher(interm_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_interm': v for k, v in l_dict.items()} losses.update(l_dict) # enc output loss if 'enc_outputs' in outputs: for i, enc_outputs in enumerate(outputs['enc_outputs']): indices = self.matcher(enc_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()} losses.update(l_dict) if return_indices: indices_list.append(indices0_copy) return losses, indices_list return losses def prep_for_dn(self,dn_meta): output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes'] num_dn_groups,pad_size=dn_meta['num_dn_group'],dn_meta['pad_size'] assert pad_size % num_dn_groups==0 single_pad=pad_size//num_dn_groups return output_known_lbs_bboxes,single_pad,num_dn_groups class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100, nms_iou_threshold=-1) -> None: super().__init__() self.num_select = num_select self.nms_iou_threshold = nms_iou_threshold @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) if test: assert not not_to_xyxy boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) if self.random_refpoints_xy: self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False if self.fix_refpoints_hw > 0: print("fix_refpoints_hw: {}".format(self.fix_refpoints_hw)) assert self.random_refpoints_xy self.refpoint_embed.weight.data[:, 2:] = self.fix_refpoints_hw self.refpoint_embed.weight.data[:, 2:] = inverse_sigmoid(self.refpoint_embed.weight.data[:, 2:]) self.refpoint_embed.weight.data[:, 2:].requires_grad = False elif int(self.fix_refpoints_hw) == -1: pass elif int(self.fix_refpoints_hw) == -2: print('learn a shared h and w') assert self.random_refpoints_xy self.refpoint_embed = nn.Embedding(use_num_queries, 2) self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False self.hw_embed = nn.Embedding(1, 1) else: raise NotImplementedError('Unknown fix_refpoints_hw {}'.format(self.fix_refpoints_hw)) def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0] += self.label_enc.weight[0,0]*0.0 # deformable-detr-like anchor update # reference_before_sigmoid = inverse_sigmoid(reference[:-1]) # n_dec, bs, nq, 4 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = layer_outputs_unsig.sigmoid() outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) outputs_class = torch.stack([layer_cls_embed(layer_hs) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]) if self.dn_number > 0 and dn_meta is not None: outputs_class, outputs_coord_list = \ dn_post_process(outputs_class, outputs_coord_list, dn_meta,self.aux_loss,self._set_aux_loss) out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]} if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = layer_enc_outputs_coord_unsig.sigmoid() layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] src_masks = src_masks[src_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(src_masks) target_masks = target_masks[tgt_idx] # upsample predictions to the target size src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(src_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} device=next(iter(outputs.values())).device indices = self.matcher(outputs_without_aux, targets) if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device) if is_dist_avail_and_initialized(): torch.distributed.all_reduce(num_boxes) num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() # Compute all the requested losses losses = {} # prepare for dn loss dn_meta = outputs['dn_meta'] if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta: output_known_lbs_bboxes,single_pad, scalar = self.prep_for_dn(dn_meta) dn_pos_idx = [] dn_neg_idx = [] for i in range(len(targets)): if len(targets[i]['labels']) > 0: t = torch.range(0, len(targets[i]['labels']) - 1).long().cuda() t = t.unsqueeze(0).repeat(scalar, 1) tgt_idx = t.flatten() output_idx = (torch.tensor(range(scalar)) * single_pad).long().cuda().unsqueeze(1) + t output_idx = output_idx.flatten() else: output_idx = tgt_idx = torch.tensor([]).long().cuda() dn_pos_idx.append((output_idx, tgt_idx)) dn_neg_idx.append((output_idx + single_pad // 2, tgt_idx)) output_known_lbs_bboxes=dn_meta['output_known_lbs_bboxes'] l_dict = {} for loss in self.losses: kwargs = {} if 'labels' in loss: kwargs = {'log': False} l_dict.update(self.get_loss(loss, output_known_lbs_bboxes, targets, dn_pos_idx, num_boxes*scalar,**kwargs)) l_dict = {k + f'_dn': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') losses.update(l_dict) for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if 'aux_outputs' in outputs: for idx, aux_outputs in enumerate(outputs['aux_outputs']): indices = self.matcher(aux_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta: aux_outputs_known = output_known_lbs_bboxes['aux_outputs'][idx] l_dict={} for loss in self.losses: kwargs = {} if 'labels' in loss: kwargs = {'log': False} l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx, num_boxes*scalar, **kwargs)) l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) # interm_outputs loss if 'interm_outputs' in outputs: interm_outputs = outputs['interm_outputs'] indices = self.matcher(interm_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_interm': v for k, v in l_dict.items()} losses.update(l_dict) # enc output loss if 'enc_outputs' in outputs: for i, enc_outputs in enumerate(outputs['enc_outputs']): indices = self.matcher(enc_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()} losses.update(l_dict) if return_indices: indices_list.append(indices0_copy) return losses, indices_list return losses def prep_for_dn(self,dn_meta): output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes'] num_dn_groups,pad_size=dn_meta['num_dn_group'],dn_meta['pad_size'] assert pad_size % num_dn_groups==0 single_pad=pad_size//num_dn_groups return output_known_lbs_bboxes,single_pad,num_dn_groups class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100, nms_iou_threshold=-1) -> None: super().__init__() self.num_select = num_select self.nms_iou_threshold = nms_iou_threshold @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) if test: assert not not_to_xyxy boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results
@MODULE_BUILD_FUNCS.registe_with_name(module_name='dino')
17
2023-12-04 00:27:58+00:00
16k
girgle/DouZero_For_New_HLDDZ
test.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"Unity...
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QGraphicsItem, QGraphicsPixmapItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent from cnocr import CnOcr
11,932
'''from GameHelper import GameHelper import numpy as np import cv2 from main import MyPyQT_Form as form from collections import defaultdict import GameHelper as gh GeneralBtnPos = (268, 481, 1240, 255) helper = GameHelper() helper.ScreenZoomRate = 1.0 img = cv2.imread("chaojijiabei.png") img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) result = helper.LocateOnScreen("chaojijiabei_btn", img=img, region=GeneralBtnPos) print(result)''' # -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx ocr = CnOcr(det_model_name='en_PP-OCRv3_det', rec_model_name='en_PP-OCRv3', cand_alphabet="12345678910JQKA") # 所有参数都使用默认值
'''from GameHelper import GameHelper import numpy as np import cv2 from main import MyPyQT_Form as form from collections import defaultdict import GameHelper as gh GeneralBtnPos = (268, 481, 1240, 255) helper = GameHelper() helper.ScreenZoomRate = 1.0 img = cv2.imread("chaojijiabei.png") img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) result = helper.LocateOnScreen("chaojijiabei_btn", img=img, region=GeneralBtnPos) print(result)''' # -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx ocr = CnOcr(det_model_name='en_PP-OCRv3_det', rec_model_name='en_PP-OCRv3', cand_alphabet="12345678910JQKA") # 所有参数都使用默认值
helper = GameHelper()
0
2023-12-01 04:04:30+00:00
16k
yongzhuo/MacroGPT-Pretrain
macro_gpt/ft_gpt/train.pt.add.py
[ { "identifier": "CUDA_VISIBLE_DEVICES", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "CUDA_VISIBLE_DEVICES = \"0\"" }, { "identifier": "USE_TORCH", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "USE_TORCH = \"1\"" }, { "identifier"...
import random import copy import sys import os import bitsandbytes as bnb import torch.nn as nn import transformers import torch from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import CUDA_VISIBLE_DEVICES, USE_TORCH, CPU_NUMS # from config from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from peft import (get_peft_model_state_dict, get_peft_model, LoraConfig) from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.modeling_utils import unwrap_model from tensorboardX import SummaryWriter from datasets import load_dataset from macro_gpt.models.llama.modeling_llama import LlamaForCausalLM as LLMForCausalLM from macro_gpt.models.llama.tokenization_llama import LlamaTokenizer as LLMTokenizer from macro_gpt.models.llama.modeling_llama import LlamaConfig as LLMConfig from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_PRETRAIN, DATA_PATH, MODEL_SAVE_DIR, REPO_ID from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MICRO_BATCH_SIZE, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LEARNING_RATE, EPOCHS, SAVE_STEPS, VAL_SET_SIZE, TARGET_MODULES from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import IS_PARALLELIZABLE, MODEL_PARALLEL, USE_CACHE from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MAX_LENGTH_Q, MAX_LENGTH_A, MAX_LENGTH_QA from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LORA_DROPOUT, LORA_ALPHA, LORA_R from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_CONFIG, PATH_TOKENIZER_PRETRAIN
13,109
print_rank_0(out) return out def data_collator(batch): def get_position_ids(seq, bos_token_id): seq_length = len(seq) position_ids = torch.arange(seq_length, dtype=torch.long).unsqueeze(0) return position_ids def get_masks(seq, special_ids=IDS_ORG): """ padding-mask """ # mask until ID_SOP attention_mask = torch.ones((1, len(seq), len(seq))) attention_mask.tril_() # ### 如果 padding-right, 也mask掉 # for idx, s in enumerate(seq): # if s in special_ids: # attention_mask[..., idx] = 1 attention_mask = (attention_mask < 0.5).bool() return attention_mask len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1 for i in range(len(batch))] len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch)) batch_attention_mask = [] batch_position_ids = [] batch_input_ids = [] batch_labels = [] for ba in batch: x, y = ba.get("input_ids"), ba.get("labels") len_padding = len_max_batch - len(x) - len(y) if tokenizer.padding_side and tokenizer.padding_side == "left": labels = [-100] * len_padding + x + y input_ids = [ID_PAD] * (len_padding) + x + y else: labels = x + y + [-100] * len_padding input_ids = x + y + [ID_PAD] * (len_padding) tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP) tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG) tensor_input_ids = torch.tensor(input_ids, dtype=torch.long) tensor_labels = torch.tensor(labels, dtype=torch.long) batch_attention_mask.append(tensor_attention_mask) batch_position_ids.append(tensor_position_ids) batch_input_ids.append(tensor_input_ids) batch_labels.append(tensor_labels) # print_rank_0(batch_attention_mask) batch_attention_mask = torch.stack(batch_attention_mask) batch_position_ids = torch.stack(batch_position_ids) batch_input_ids = torch.stack(batch_input_ids) batch_labels = torch.stack(batch_labels) input_dict = { # "full_attention_mask": copy.deepcopy(batch_attention_mask), # "attention_mask": batch_attention_mask, # "position_ids": batch_position_ids, "input_ids": batch_input_ids, "labels": batch_labels, } # print_rank_0(input_dict) return input_dict def dfs_file(path_dir): """ 递归获取某个目录下的所有文件(所有层, 包括子目录) Args: path_dir[String]:, path of dir, eg. "/home/data" Returns: data[List]: data of input, eg. ["2020_01_08.txt"] """ path_files = [] for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件 for file in files: # 遍历文件 file_path = os.path.join(root, file) # 获取文件绝对路径 path_files.append(file_path) # 将文件路径添加进列表 files = list(set(path_files)) files.sort() # the same list return files def print_rank_0(*args): """ 只打印 0 号GPU的 """ # if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。 # print(*args) print(*args) def local_rank_is_0(): """ 判断是哪台机子的 """ # flag = False # if torch.distributed.get_rank() == 0: # flag = True # return flag return True # import torch.distributed as dist # dist.init_process_group(backend='nccl') # torch.distributed.init_process_group() tokenizer = LLMTokenizer.from_pretrained(PATH_TOKENIZER_PRETRAIN) # tokenizer.pad_token = tokenizer.eos_token # tokenizer.padding_side = "left" # Allow batched inference tokenizer.padding_side = "right" # Allow batched inference # ID_gMASK = 64790 # ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 2 ID_SOP = 1 ID_BOS = 1 ID_EOS = 2 ID_PAD = 0 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } model = LLMForCausalLM.from_pretrained(PATH_MODEL_CONFIG) model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL
# !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2023/3/5 21:04 # @author : Mo # @function: macro-gpt path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) sys.path.append(path_root) os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072" os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES os.environ["USE_TORCH"] = USE_TORCH os.environ["OMP_NUM_THREADS"] = CPU_NUMS # export OMP_NUM_THREADS=1 os.environ["OPENBLAS_NUM_THREADS"] = CPU_NUMS # export OPENBLAS_NUM_THREADS=1 os.environ["MKL_NUM_THREADS"] = CPU_NUMS # export MKL_NUM_THREADS=1 os.environ["VECLIB_MAXIMUM_THREADS"] = CPU_NUMS # export VECLIB_MAXIMUM_THREADS=1 os.environ["NUMEXPR_NUM_THREADS"] = CPU_NUMS # export NUMEXPR_NUM_THREADS=1 def save_model_state(model, config=None, model_save_dir="./", model_name="adapter_model.bin"): """ 仅保存 有梯度 的 模型参数(推荐使用) """ if not os.path.exists(model_save_dir): os.makedirs(model_save_dir) # save config if config: config.save_pretrained(model_save_dir) # config.to_dict() # save model path_model = os.path.join(model_save_dir, model_name) # grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters() # if v.requires_grad == True} grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters()} torch.save(grad_params_dict, path_model) print_rank_0("******model_save_path is {}******".format(path_model)) def print_rank_0_named_parameters(model, use_print_rank_0_data=False): """ 打印模型训练参数/数据类型信息 """ trainable_params = 0 all_param = 0 for name, param in model.named_parameters(): if use_print_rank_0_data: print_rank_0((name, param.data.dtype, param.requires_grad, param.data)) else: print_rank_0((name, param.data.dtype, param.requires_grad)) num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel all_param += num_params if param.requires_grad: trainable_params += num_params print_rank_0(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}") def prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", use_gradient_checkpointing=True, layer_norm_names=["layer_norm"]): r""" This method wrapps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers` """ # 不要使用 model.half(), 这样会先截取精度再训练了, 最初data就要保持half for name, param in model.named_parameters(): # freeze base model's layers # cast layer norm in fp32 for stability for 8bit models if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names): param.data = param.data.to(torch.float32) elif output_embedding_layer_name in name: # lm_head也需要是tf.float32(最后一层) param.data = param.data.to(torch.float32) else: param.data = param.data.to(torch.half) if use_gradient_checkpointing: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable() return model def generate_prompt(data_point, is_logger=False): # sorry about the formatting disaster gotta move fast # text_1 = f"指令:\n{data_point.get('instruction', '')}\n问:\n{data_point.get('input', '')}\n答:\n" \ # if data_point.get('input', '') else f"指令:\n{data_point.get('instruction', '')}\n答:\n" # text_2 = f"{data_point.get('output', '')}" text_a = data_point.get("a", "") prompt_str_1 = text_a # end with gMASK, <sop> x = tokenizer.encode(prompt_str_1) if len(x) > MAX_LENGTH_QA - 2: x = x[:MAX_LENGTH_QA - 2] if not x: x = [ID_PAD, ID_EOS] if x and x[-1] != ID_EOS: x += [ID_EOS] out = {"input_ids": x, "labels": []} if is_logger: print_rank_0(prompt_str_1) print_rank_0(out) return out def data_collator(batch): def get_position_ids(seq, bos_token_id): seq_length = len(seq) position_ids = torch.arange(seq_length, dtype=torch.long).unsqueeze(0) return position_ids def get_masks(seq, special_ids=IDS_ORG): """ padding-mask """ # mask until ID_SOP attention_mask = torch.ones((1, len(seq), len(seq))) attention_mask.tril_() # ### 如果 padding-right, 也mask掉 # for idx, s in enumerate(seq): # if s in special_ids: # attention_mask[..., idx] = 1 attention_mask = (attention_mask < 0.5).bool() return attention_mask len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1 for i in range(len(batch))] len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch)) batch_attention_mask = [] batch_position_ids = [] batch_input_ids = [] batch_labels = [] for ba in batch: x, y = ba.get("input_ids"), ba.get("labels") len_padding = len_max_batch - len(x) - len(y) if tokenizer.padding_side and tokenizer.padding_side == "left": labels = [-100] * len_padding + x + y input_ids = [ID_PAD] * (len_padding) + x + y else: labels = x + y + [-100] * len_padding input_ids = x + y + [ID_PAD] * (len_padding) tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP) tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG) tensor_input_ids = torch.tensor(input_ids, dtype=torch.long) tensor_labels = torch.tensor(labels, dtype=torch.long) batch_attention_mask.append(tensor_attention_mask) batch_position_ids.append(tensor_position_ids) batch_input_ids.append(tensor_input_ids) batch_labels.append(tensor_labels) # print_rank_0(batch_attention_mask) batch_attention_mask = torch.stack(batch_attention_mask) batch_position_ids = torch.stack(batch_position_ids) batch_input_ids = torch.stack(batch_input_ids) batch_labels = torch.stack(batch_labels) input_dict = { # "full_attention_mask": copy.deepcopy(batch_attention_mask), # "attention_mask": batch_attention_mask, # "position_ids": batch_position_ids, "input_ids": batch_input_ids, "labels": batch_labels, } # print_rank_0(input_dict) return input_dict def dfs_file(path_dir): """ 递归获取某个目录下的所有文件(所有层, 包括子目录) Args: path_dir[String]:, path of dir, eg. "/home/data" Returns: data[List]: data of input, eg. ["2020_01_08.txt"] """ path_files = [] for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件 for file in files: # 遍历文件 file_path = os.path.join(root, file) # 获取文件绝对路径 path_files.append(file_path) # 将文件路径添加进列表 files = list(set(path_files)) files.sort() # the same list return files def print_rank_0(*args): """ 只打印 0 号GPU的 """ # if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。 # print(*args) print(*args) def local_rank_is_0(): """ 判断是哪台机子的 """ # flag = False # if torch.distributed.get_rank() == 0: # flag = True # return flag return True # import torch.distributed as dist # dist.init_process_group(backend='nccl') # torch.distributed.init_process_group() tokenizer = LLMTokenizer.from_pretrained(PATH_TOKENIZER_PRETRAIN) # tokenizer.pad_token = tokenizer.eos_token # tokenizer.padding_side = "left" # Allow batched inference tokenizer.padding_side = "right" # Allow batched inference # ID_gMASK = 64790 # ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 2 ID_SOP = 1 ID_BOS = 1 ID_EOS = 2 ID_PAD = 0 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } model = LLMForCausalLM.from_pretrained(PATH_MODEL_CONFIG) model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL
model.config.use_cache = USE_CACHE
20
2023-11-30 12:39:19+00:00
16k
owkin/fedeca
fedeca/scripts/dp_logreg.py
[ { "identifier": "TorchDPFedAvgAlgo", "path": "fedeca/algorithms/torch_dp_fed_avg_algo.py", "snippet": "class TorchDPFedAvgAlgo(TorchFedAvgAlgo):\n \"\"\"To be inherited.\n\n Wraps the necessary operation so a torch model can be trained in the Federated\n Averaging strategy using DP.\n \"\"\"...
import sys import numpy as np import pandas as pd import torch import torch.nn as nn from itertools import product from sklearn.metrics import accuracy_score from substrafl.algorithms.pytorch import TorchNewtonRaphsonAlgo from substrafl.model_loading import download_algo_state from substrafl.strategies import FedAvg, NewtonRaphson from torch.optim import SGD from fedeca.algorithms.torch_dp_fed_avg_algo import TorchDPFedAvgAlgo from fedeca.fedeca_core import LogisticRegressionTorch from fedeca.utils import ( Experiment, make_accuracy_function, make_substrafl_torch_dataset_class, ) from fedeca.utils.survival_utils import CoxData, make_categorical
11,471
"""Runs the propensity model training part with DP.""" if __name__ == "__main__": epsilons = [0.1, 1.0, 5.0, 10.0][::-1] deltas = [10 ** (-i) for i in range(1, 3)] START_SEED = 42 NDIM = 10 NUM_ROUNDS = 10 NUM_UPDATES = 100 N_REPETITIONS = 5 BACKEND_TYPE = "subprocess" BATCH_SIZE = 32 na_proportion = 0.0 seeds = np.arange(START_SEED, START_SEED + N_REPETITIONS).tolist() rng = np.random.default_rng(seeds[0]) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=300, ndim=NDIM, prop_treated=0.5, propensity="linear", dtype="float32", overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical
"""Runs the propensity model training part with DP.""" if __name__ == "__main__": epsilons = [0.1, 1.0, 5.0, 10.0][::-1] deltas = [10 ** (-i) for i in range(1, 3)] START_SEED = 42 NDIM = 10 NUM_ROUNDS = 10 NUM_UPDATES = 100 N_REPETITIONS = 5 BACKEND_TYPE = "subprocess" BATCH_SIZE = 32 na_proportion = 0.0 seeds = np.arange(START_SEED, START_SEED + N_REPETITIONS).tolist() rng = np.random.default_rng(seeds[0]) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=300, ndim=NDIM, prop_treated=0.5, propensity="linear", dtype="float32", overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical
Xcat, Xcont = make_categorical(X, up_to=0)
6
2023-11-27 18:01:37+00:00
16k
aliyun/pai-python-sdk
pai/modelscope/model.py
[ { "identifier": "ImageLabel", "path": "pai/api/image.py", "snippet": "class ImageLabel(object):\n \"\"\"Image Label Class.\"\"\"\n\n # Unofficial Image Label\n UNOFFICIAL_LABEL = \"system.official=false\"\n # Official Image Label\n OFFICIAL_LABEL = \"system.official=true\"\n\n # PAI Im...
import logging from typing import Any, Dict, List, Optional, Union from ..api.image import ImageLabel from ..common.utils import to_semantic_version from ..model import ( DefaultServiceConfig, ModelBase, ResourceConfig, container_serving_spec, ) from ..serializers import SerializerBase from ..session import Session, get_default_session
11,105
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) class ModelScopeModel(ModelBase): """A ModelScope ``Model`` that can be deployed in PAI to create a prediction service. A ModelScopeModel instance includes the model artifact path and information on how to create prediction service in PAI. By calling the deploy() method, a prediction service is created in PAI and a :class:`pai.predictor.Predictor` instance is returned that can be used to make prediction to the service. Example:: # Initialize a ModelScopeModel. m: ModelScopeModel = ModelScopeModel( model_data="oss://bucket-name/path/to/model", source_dir="./serving/src/", command="python serving.py", modelscope_version="latest", ) # Deploy the model to create an online prediction service. p: Predictor = m.deploy( service_name="ms_bert_serving", instance_type="ecs.gn6i-c4g1.xlarge", instance_count=1, options={ "metadata.rpc.keepalive": 5000000, "features.eas.aliyun.com/extra-ephemeral-storage":"40Gi", }, ) # Make prediction by sending the data to the online prediction service. result = p.predict("weather is good") """ def __init__( self, model_data: Optional[str] = None, image_uri: Optional[str] = None, modelscope_version: Optional[str] = None, command: Optional[str] = None, source_dir: Optional[str] = None, git_config: Optional[Dict[str, str]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None,
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) class ModelScopeModel(ModelBase): """A ModelScope ``Model`` that can be deployed in PAI to create a prediction service. A ModelScopeModel instance includes the model artifact path and information on how to create prediction service in PAI. By calling the deploy() method, a prediction service is created in PAI and a :class:`pai.predictor.Predictor` instance is returned that can be used to make prediction to the service. Example:: # Initialize a ModelScopeModel. m: ModelScopeModel = ModelScopeModel( model_data="oss://bucket-name/path/to/model", source_dir="./serving/src/", command="python serving.py", modelscope_version="latest", ) # Deploy the model to create an online prediction service. p: Predictor = m.deploy( service_name="ms_bert_serving", instance_type="ecs.gn6i-c4g1.xlarge", instance_count=1, options={ "metadata.rpc.keepalive": 5000000, "features.eas.aliyun.com/extra-ephemeral-storage":"40Gi", }, ) # Make prediction by sending the data to the online prediction service. result = p.predict("weather is good") """ def __init__( self, model_data: Optional[str] = None, image_uri: Optional[str] = None, modelscope_version: Optional[str] = None, command: Optional[str] = None, source_dir: Optional[str] = None, git_config: Optional[Dict[str, str]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None,
session: Optional[Session] = None,
7
2023-12-01 01:40:12+00:00
16k
JunMa11/UHNSeg-Quiz
nnunetv2/inference/predict_from_raw_data.py
[ { "identifier": "default_num_processes", "path": "nnunetv2/configuration.py", "snippet": "ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low" }, { "identifier": "PreprocessAdapterFromNpy", "path": "nnunetv2/inference/data_iterators....
import inspect import multiprocessing import os import traceback import numpy as np import torch import nnunetv2 import argparse import multiprocessing import argparse import multiprocessing from copy import deepcopy from time import sleep from typing import Tuple, Union, List, Optional from acvl_utils.cropping_and_padding.padding import pad_nd_image from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \ save_json from torch import nn from torch._dynamo import OptimizedModule from torch.nn.parallel import DistributedDataParallel from tqdm import tqdm from nnunetv2.configuration import default_num_processes from nnunetv2.inference.data_iterators import PreprocessAdapterFromNpy, preprocessing_iterator_fromfiles, \ preprocessing_iterator_fromnpy from nnunetv2.inference.export_prediction import export_prediction_from_logits, \ convert_predicted_logits_to_segmentation_with_correct_shape from nnunetv2.inference.sliding_window_prediction import compute_gaussian, \ compute_steps_for_sliding_window from nnunetv2.utilities.file_path_utilities import get_output_folder, check_workers_alive_and_busy from nnunetv2.utilities.find_class_by_name import recursive_find_python_class from nnunetv2.utilities.helpers import empty_cache, dummy_context from nnunetv2.utilities.json_export import recursive_fix_for_json_export from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder from nnunetv2.paths import nnUNet_results, nnUNet_raw from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
11,229
# mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images)) pp = preprocessing_iterator_fromnpy( list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing ) return pp def predict_from_list_of_npy_arrays(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): iterator = self.get_data_iterator_from_raw_npy_data(image_or_list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, num_processes) return self.predict_from_data_iterator(iterator, save_probabilities, num_processes_segmentation_export) def predict_from_data_iterator(self, data_iterator, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): """ each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! If 'ofile' is None, the result will be returned instead of written to a file """ with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool: worker_list = [i for i in export_pool._pool] r = [] for preprocessed in data_iterator: data = preprocessed['data'] if isinstance(data, str): delfile = data data = torch.from_numpy(np.load(data)) os.remove(delfile) ofile = preprocessed['ofile'] if ofile is not None: print(f'\nPredicting {os.path.basename(ofile)}:') else: print(f'\nPredicting image of shape {data.shape}:') print(f'perform_everything_on_gpu: {self.perform_everything_on_gpu}') properties = preprocessed['data_properties'] # let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with # npy files proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) while not proceed: # print('sleeping') sleep(0.1) proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) prediction = self.predict_logits_from_preprocessed_data(data).cpu() if ofile is not None: # this needs to go into background processes # export_prediction_from_logits(prediction, properties, configuration_manager, plans_manager, # dataset_json, ofile, save_probabilities) print('sending off prediction to background worker for resampling and export') r.append( export_pool.starmap_async( export_prediction_from_logits, ((prediction, properties, self.configuration_manager, self.plans_manager, self.dataset_json, ofile, save_probabilities),) ) ) else: # convert_predicted_logits_to_segmentation_with_correct_shape(prediction, plans_manager, # configuration_manager, label_manager, # properties, # save_probabilities) print('sending off prediction to background worker for resampling') r.append( export_pool.starmap_async(
class nnUNetPredictor(object): def __init__(self, tile_step_size: float = 0.5, use_gaussian: bool = True, use_mirroring: bool = True, perform_everything_on_gpu: bool = True, device: torch.device = torch.device('cuda'), verbose: bool = False, verbose_preprocessing: bool = False, allow_tqdm: bool = True): self.verbose = verbose self.verbose_preprocessing = verbose_preprocessing self.allow_tqdm = allow_tqdm self.plans_manager, self.configuration_manager, self.list_of_parameters, self.network, self.dataset_json, \ self.trainer_name, self.allowed_mirroring_axes, self.label_manager = None, None, None, None, None, None, None, None self.tile_step_size = tile_step_size self.use_gaussian = use_gaussian self.use_mirroring = use_mirroring if device.type == 'cuda': # device = torch.device(type='cuda', index=0) # set the desired GPU with CUDA_VISIBLE_DEVICES! # why would I ever want to do that. Stupid dobby. This kills DDP inference... pass if device.type != 'cuda': print(f'perform_everything_on_gpu=True is only supported for cuda devices! Setting this to False') perform_everything_on_gpu = False self.device = device self.perform_everything_on_gpu = perform_everything_on_gpu def initialize_from_trained_model_folder(self, model_training_output_dir: str, use_folds: Union[Tuple[Union[int, str]], None], checkpoint_name: str = 'checkpoint_final.pth'): """ This is used when making predictions with a trained model """ if use_folds is None: use_folds = nnUNetPredictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) dataset_json = load_json(join(model_training_output_dir, 'dataset.json')) plans = load_json(join(model_training_output_dir, 'plans.json')) plans_manager = PlansManager(plans) if isinstance(use_folds, str): use_folds = [use_folds] parameters = [] for i, f in enumerate(use_folds): f = int(f) if f != 'all' else f checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name), map_location=torch.device('cpu')) if i == 0: trainer_name = checkpoint['trainer_name'] configuration_name = checkpoint['init_args']['configuration'] inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \ 'inference_allowed_mirroring_axes' in checkpoint.keys() else None parameters.append(checkpoint['network_weights']) configuration_manager = plans_manager.get_configuration(configuration_name) # restore network num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), trainer_name, 'nnunetv2.training.nnUNetTrainer') network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager, num_input_channels, enable_deep_supervision=False) self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) if ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) \ and not isinstance(self.network, OptimizedModule): print('compiling network') self.network = torch.compile(self.network) def manual_initialization(self, network: nn.Module, plans_manager: PlansManager, configuration_manager: ConfigurationManager, parameters: Optional[List[dict]], dataset_json: dict, trainer_name: str, inference_allowed_mirroring_axes: Optional[Tuple[int, ...]]): """ This is used by the nnUNetTrainer to initialize nnUNetPredictor for the final validation """ self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) allow_compile = True allow_compile = allow_compile and ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) allow_compile = allow_compile and not isinstance(self.network, OptimizedModule) if isinstance(self.network, DistributedDataParallel): allow_compile = allow_compile and isinstance(self.network.module, OptimizedModule) if allow_compile: print('compiling network') self.network = torch.compile(self.network) @staticmethod def auto_detect_available_folds(model_training_output_dir, checkpoint_name): print('use_folds is None, attempting to auto detect available folds') fold_folders = subdirs(model_training_output_dir, prefix='fold_', join=False) fold_folders = [i for i in fold_folders if i != 'fold_all'] fold_folders = [i for i in fold_folders if isfile(join(model_training_output_dir, i, checkpoint_name))] use_folds = [int(i.split('_')[-1]) for i in fold_folders] print(f'found the following folds: {use_folds}') return use_folds def _manage_input_and_output_lists(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[None, str, List[str]], folder_with_segs_from_prev_stage: str = None, overwrite: bool = True, part_id: int = 0, num_parts: int = 1, save_probabilities: bool = False): if isinstance(list_of_lists_or_source_folder, str): list_of_lists_or_source_folder = create_lists_from_splitted_dataset_folder(list_of_lists_or_source_folder, self.dataset_json['file_ending']) print(f'There are {len(list_of_lists_or_source_folder)} cases in the source folder') list_of_lists_or_source_folder = list_of_lists_or_source_folder[part_id::num_parts] caseids = [os.path.basename(i[0])[:-(len(self.dataset_json['file_ending']) + 5)] for i in list_of_lists_or_source_folder] print( f'I am process {part_id} out of {num_parts} (max process ID is {num_parts - 1}, we start counting with 0!)') print(f'There are {len(caseids)} cases that I would like to predict') if isinstance(output_folder_or_list_of_truncated_output_files, str): output_filename_truncated = [join(output_folder_or_list_of_truncated_output_files, i) for i in caseids] else: output_filename_truncated = output_folder_or_list_of_truncated_output_files seg_from_prev_stage_files = [join(folder_with_segs_from_prev_stage, i + self.dataset_json['file_ending']) if folder_with_segs_from_prev_stage is not None else None for i in caseids] # remove already predicted files form the lists if not overwrite and output_filename_truncated is not None: tmp = [isfile(i + self.dataset_json['file_ending']) for i in output_filename_truncated] if save_probabilities: tmp2 = [isfile(i + '.npz') for i in output_filename_truncated] tmp = [i and j for i, j in zip(tmp, tmp2)] not_existing_indices = [i for i, j in enumerate(tmp) if not j] output_filename_truncated = [output_filename_truncated[i] for i in not_existing_indices] list_of_lists_or_source_folder = [list_of_lists_or_source_folder[i] for i in not_existing_indices] seg_from_prev_stage_files = [seg_from_prev_stage_files[i] for i in not_existing_indices] print(f'overwrite was set to {overwrite}, so I am only working on cases that haven\'t been predicted yet. ' f'That\'s {len(not_existing_indices)} cases.') return list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files def predict_from_files(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[str, None, List[str]], save_probabilities: bool = False, overwrite: bool = True, num_processes_preprocessing: int = default_num_processes, num_processes_segmentation_export: int = default_num_processes, folder_with_segs_from_prev_stage: str = None, num_parts: int = 1, part_id: int = 0): """ This is nnU-Net's default function for making predictions. It works best for batch predictions (predicting many images at once). """ if isinstance(output_folder_or_list_of_truncated_output_files, str): output_folder = output_folder_or_list_of_truncated_output_files elif isinstance(output_folder_or_list_of_truncated_output_files, list): output_folder = os.path.dirname(output_folder_or_list_of_truncated_output_files[0]) else: output_folder = None ######################## # let's store the input arguments so that its clear what was used to generate the prediction if output_folder is not None: my_init_kwargs = {} for k in inspect.signature(self.predict_from_files).parameters.keys(): my_init_kwargs[k] = locals()[k] my_init_kwargs = deepcopy( my_init_kwargs) # let's not unintentionally change anything in-place. Take this as a recursive_fix_for_json_export(my_init_kwargs) maybe_mkdir_p(output_folder) save_json(my_init_kwargs, join(output_folder, 'predict_from_raw_data_args.json')) # we need these two if we want to do things with the predictions like for example apply postprocessing save_json(self.dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) save_json(self.plans_manager.plans, join(output_folder, 'plans.json'), sort_keys=False) ####################### # check if we need a prediction from the previous stage if self.configuration_manager.previous_stage_name is not None: assert folder_with_segs_from_prev_stage is not None, \ f'The requested configuration is a cascaded network. It requires the segmentations of the previous ' \ f'stage ({self.configuration_manager.previous_stage_name}) as input. Please provide the folder where' \ f' they are located via folder_with_segs_from_prev_stage' # sort out input and output filenames list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files = \ self._manage_input_and_output_lists(list_of_lists_or_source_folder, output_folder_or_list_of_truncated_output_files, folder_with_segs_from_prev_stage, overwrite, part_id, num_parts, save_probabilities) if len(list_of_lists_or_source_folder) == 0: return data_iterator = self._internal_get_data_iterator_from_lists_of_filenames(list_of_lists_or_source_folder, seg_from_prev_stage_files, output_filename_truncated, num_processes_preprocessing) return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export) def _internal_get_data_iterator_from_lists_of_filenames(self, input_list_of_lists: List[List[str]], seg_from_prev_stage_files: Union[List[str], None], output_filenames_truncated: Union[List[str], None], num_processes: int): return preprocessing_iterator_fromfiles(input_list_of_lists, seg_from_prev_stage_files, output_filenames_truncated, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing) # preprocessor = self.configuration_manager.preprocessor_class(verbose=self.verbose_preprocessing) # # hijack batchgenerators, yo # # we use the multiprocessing of the batchgenerators dataloader to handle all the background worker stuff. This # # way we don't have to reinvent the wheel here. # num_processes = max(1, min(num_processes, len(input_list_of_lists))) # ppa = PreprocessAdapter(input_list_of_lists, seg_from_prev_stage_files, preprocessor, # output_filenames_truncated, self.plans_manager, self.dataset_json, # self.configuration_manager, num_processes) # if num_processes == 0: # mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images)) pp = preprocessing_iterator_fromnpy( list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing ) return pp def predict_from_list_of_npy_arrays(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): iterator = self.get_data_iterator_from_raw_npy_data(image_or_list_of_images, segs_from_prev_stage_or_list_of_segs_from_prev_stage, properties_or_list_of_properties, truncated_ofname, num_processes) return self.predict_from_data_iterator(iterator, save_probabilities, num_processes_segmentation_export) def predict_from_data_iterator(self, data_iterator, save_probabilities: bool = False, num_processes_segmentation_export: int = default_num_processes): """ each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! If 'ofile' is None, the result will be returned instead of written to a file """ with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool: worker_list = [i for i in export_pool._pool] r = [] for preprocessed in data_iterator: data = preprocessed['data'] if isinstance(data, str): delfile = data data = torch.from_numpy(np.load(data)) os.remove(delfile) ofile = preprocessed['ofile'] if ofile is not None: print(f'\nPredicting {os.path.basename(ofile)}:') else: print(f'\nPredicting image of shape {data.shape}:') print(f'perform_everything_on_gpu: {self.perform_everything_on_gpu}') properties = preprocessed['data_properties'] # let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with # npy files proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) while not proceed: # print('sleeping') sleep(0.1) proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) prediction = self.predict_logits_from_preprocessed_data(data).cpu() if ofile is not None: # this needs to go into background processes # export_prediction_from_logits(prediction, properties, configuration_manager, plans_manager, # dataset_json, ofile, save_probabilities) print('sending off prediction to background worker for resampling and export') r.append( export_pool.starmap_async( export_prediction_from_logits, ((prediction, properties, self.configuration_manager, self.plans_manager, self.dataset_json, ofile, save_probabilities),) ) ) else: # convert_predicted_logits_to_segmentation_with_correct_shape(prediction, plans_manager, # configuration_manager, label_manager, # properties, # save_probabilities) print('sending off prediction to background worker for resampling') r.append( export_pool.starmap_async(
convert_predicted_logits_to_segmentation_with_correct_shape, (
5
2023-12-04 19:43:14+00:00
16k
Zuricho/chroma_pipeline
chroma/layers/structure/protein_graph.py
[ { "identifier": "Protein", "path": "chroma/data/protein.py", "snippet": "class Protein:\n \"\"\"\n Protein: A utility class for managing proteins within the Chroma ecosystem.\n\n The Protein class offers a suite of methods for loading, saving, transforming, and viewing protein structures\n a...
import json import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple from chroma.data.protein import Protein from chroma.layers import graph from chroma.layers.basic import FourierFeaturization, PositionalEncoding from chroma.layers.structure import backbone, geometry, transforms
12,246
for i, layer in enumerate(self.edge_layers): key = json.dumps(self.edge_features[i]) tensor = torch.tensor(param_dictionary[key], dtype=torch.float32) tensor = tensor.view(1, 1, -1) self.register_buffer(f"edge_means_{i}", tensor) return def _reference_stats(self, reference_pdb): X, C, _ = Protein.from_PDBID(reference_pdb).to_XCS() stats_dict = self._feature_stats(X, C) return stats_dict def _feature_stats(self, X, C, verbose=False, center=False): mask_i = chain_map_to_mask(C) edge_idx, mask_ij = self.graph_builder(X, C) def _masked_stats(feature, mask, dims, verbose=False): mask = mask.unsqueeze(-1) feature = mask * feature sum_mask = mask.sum() mean = feature.sum(dims, keepdim=True) / sum_mask var = torch.sum(mask * (feature - mean) ** 2, dims) / sum_mask std = torch.sqrt(var) mean = mean.view(-1) std = std.view(-1) if verbose: frac = (100.0 * std ** 2 / (mean ** 2 + std ** 2)).type(torch.int32) print(f"Fraction of raw variance: {frac}") return mean, std # Collect statistics stats_dict = {} # Aggregate node layers for i, layer in enumerate(self.node_layers): node_h = layer(X, edge_idx, C) if center: node_h = node_h - self.__getattr__(f"node_means_{i}") mean, std = _masked_stats(node_h, mask_i, dims=[0, 1]) # Store in dictionary key = json.dumps(self.node_features[i]) stats_dict[key] = mean.tolist() # Aggregate node layers for i, layer in enumerate(self.edge_layers): edge_h = layer(X, edge_idx, C) if center: edge_h = edge_h - self.__getattr__(f"edge_means_{i}") mean, std = _masked_stats(edge_h, mask_ij, dims=[0, 1, 2]) # Store in dictionary key = json.dumps(self.edge_features[i]) stats_dict[key] = mean.tolist() # Round to small number of decimal places stats_dict = {k: [round(f, 3) for f in v] for k, v in stats_dict.items()} return stats_dict class ProteinGraph(nn.Module): """Build a graph topology given a protein backbone. Args: num_neighbors (int): Maximum number of neighbors in the graph. distance_atom_type (int): Atom type for computing residue-residue distances for graph construction. Negative values will specify centroid across atom types. Default is `-1` (centroid). cutoff (float): Cutoff distance for graph construction. If not None, mask any edges further than this cutoff. Default is `None`. mask_interfaces (Boolean): Restrict connections only to within chains, excluding-between chain interactions. Default is `False`. criterion (string, optional): Method used for building graph from distances. Currently supported methods are `{knn, random_log, random_linear}`. Default is `knn`. random_alpha (float, optional): Length scale parameter for random graph generation. Default is 3. random_temperature (float, optional): Temperature parameter for random graph sampling. Between 0 and 1 this value will interpolate between a normal k-NN graph and sampling from the graph generation process. Default is 1.0. Inputs: X (torch.Tensor): Backbone coordinates with shape `(num_batch, num_residues, 4, 3)`. C (torch.LongTensor): Chain map with shape `(num_batch, num_residues)`. custom_D (torch.Tensor, optional): Optional external distance map, for example based on other distance metrics, with shape `(num_batch, num_residues, num_residues)`. custom_mask_2D (torch.Tensor, optional): Optional mask to apply to distances before computing dissimilarities with shape `(num_batch, num_residues, num_residues)`. Outputs: edge_idx (torch.LongTensor): Edge indices for neighbors with shape `(num_batch, num_residues, num_neighbors)`. mask_ij (torch.Tensor): Edge mask with shape `(num_batch, num_nodes, num_neighbors)`. """ def __init__( self, num_neighbors: int = 30, distance_atom_type: int = -1, cutoff: Optional[float] = None, mask_interfaces: bool = False, criterion: str = "knn", random_alpha: float = 3.0, random_temperature: float = 1.0, random_min_local: float = 20, deterministic: bool = False, deterministic_seed: int = 10, ): super(ProteinGraph, self).__init__() self.num_neighbors = num_neighbors self.distance_atom_type = distance_atom_type self.cutoff = cutoff self.mask_interfaces = mask_interfaces
# Copyright Generate Biomedicines, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Layers for building graph representations of protein structure. This module contains pytorch layers for representing protein structure as a graph with node and edge features based on geometric information. The graph features are differentiable with respect to input coordinates and can be used for building protein scoring functions and optimizing protein geometries natively in pytorch. """ class ProteinFeatureGraph(nn.Module): """Graph featurizer for protein chains and complexes. This module builds graph representations of protein structures that are differentiable with respect to input coordinates and invariant with respect to global rotations and translations. It takes as input a batch of protein backbones (single chains or complexes), constructs a sparse graph with residues as nodes, and featurizes the backbones in terms of node and edge feature tensors. The graph representation has 5 components: 1. Node features `node_h` representing residues in the protein. 2. Edge features `edge_h` representing relationships between residues. 3. Index map `edge_idx` representing graph topology. 4. Node mask `mask_i` that specifies which nodes are present. 5. Edge mask `mask_ij` that specifies which edges are present. Criteria for constructing the graph currently include k-Nearest Neighbors or distance-weighted edge sampling. Node and edge features are specified as tuples to make it simpler to add additional features and options while retaining backwards compatibility. Specifically, each node or edge feature type can be added to the list either in default configuration by a `'feature_name'` keyword, or in modified form with a `('feature_name', feature_kwargs)` tuple. Example usage: graph = ProteinFeatureGraph( graph_type='knn', node_features=('dihedrals',), edge_features=[ 'chain_distance', ('dmat_6mer', {'D_function': 'log'}) ] ) node_h, edge_h, edge_idx, mask_i, mask_ij = graph(X, C) This builds a kNN graph with dihedral angles as node features and 6mer interatomic distance matrices (process) 6mers, where the options for post-processing the 6mers are passed as a kwargs dict. Args: dim_nodes (int): Hidden dimension of node features. dim_edges (int): Hidden dimension of edge features. num_neighbors (int): Maximum degree of the graph. graph_kwargs (dict): Arguments for graph construction. Default is None. node_features (list): List of node feature strings and optional args. Valid feature strings are `{internal_coords}`. edge_features (list): List of node feature strings and optional args. Valid feature strings are `{'distances_6mer','distances_chain'}`. centered (boolean): Flag for enabling feature centering. If `True`, the features will be will centered by subtracting an empirical mean that was computed on the reference PDB `centered_pdb`. The statistics are per-dimension of every node and edge feature. If they have not previously been computed, the PDB will be downloaded, featurized, and aggregated into local statistics that are cached in the repo. centered_pdb (str): PDB code for the reference PDB to compute some empirical feature statistics from. Inputs: X (torch.Tensor): Backbone coordinates with shape `(num_batch, num_residues, 4, 3)`. The standard atom indices for for the the third dimension are PDB order (`[N, CA, C, O]`). C (LongTensor, optional): Chain map with shape `(num_batch, num_residues)`. The chain map codes positions as `0` when masked, poitive integers for chain indices, and negative integers to represent missing residues of the corresponding positive integers. custom_D (Tensor, optional): Pre-computed custom distance map for graph construction `(numb_batch,num_residues,num_residues)`. If present, this will override the behavior of `graph_type` and used as the distances for k-nearest neighbor graph construction. custom_mask_2D (Tensor, optional): Custom 2D mask to apply to `custom_D` with shape `(numb_batch,num_residues,num_residues)`. Outputs: node_h (torch.Tensor): Node features with shape `(num_batch, num_residues, dim_nodes)`. edge_h (torch.Tensor): Edge features with shape `(num_batch, num_residues, num_neighbors, dim_edges)`. edge_idx (torch.LongTensor): Edge indices for neighbors with shape `(num_batch, num_residues, num_neighbors)`. mask_i (torch.Tensor): Node mask with shape `(num_batch, num_residues)`. mask_ij (torch.Tensor): Edge mask with shape `(num_batch, num_nodes, num_neighbors)`. """ def __init__( self, dim_nodes: int, dim_edges: int, num_neighbors: int = 30, graph_kwargs: dict = None, node_features: tuple = ("internal_coords",), edge_features: tuple = ("distances_6mer", "distances_chain"), centered: bool = True, centered_pdb: str = "2g3n", ): super(ProteinFeatureGraph, self).__init__() self.dim_nodes = dim_nodes self.dim_edges = dim_edges self.num_neighbors = num_neighbors graph_kwargs = graph_kwargs if graph_kwargs is not None else {} self.graph_builder = ProteinGraph(num_neighbors, **graph_kwargs) self.node_features = node_features self.edge_features = edge_features def _init_layer(layer_dict, features): # Parse option string custom_args = not isinstance(features, str) key = features[0] if custom_args else features kwargs = features[1] if custom_args else {} return layer_dict[key](**kwargs) # Node feature compilation node_dict = { "internal_coords": NodeInternalCoords, "cartesian_coords": NodeCartesianCoords, "radii": NodeRadii, } self.node_layers = nn.ModuleList( [_init_layer(node_dict, option) for option in self.node_features] ) # Edge feature compilation edge_dict = { "distances_6mer": EdgeDistance6mer, "distances_2mer": EdgeDistance2mer, "orientations_2mer": EdgeOrientation2mer, "position_2mer": EdgePositionalEncodings, "distances_chain": EdgeDistanceChain, "orientations_chain": EdgeOrientationChain, "cartesian_coords": EdgeCartesianCoords, "random_fourier_2mer": EdgeRandomFourierFeatures2mer, } self.edge_layers = nn.ModuleList( [_init_layer(edge_dict, option) for option in self.edge_features] ) # Load feature centering params as buffers self.centered = centered self.centered_pdb = centered_pdb.lower() if self.centered: self._load_centering_params(self.centered_pdb) """ Storing separate linear transformations for each layer, rather than concat + one large linear, provides a more even weighting of the different input features when used with standard weight initialization. It has the specific effect actually re-weighting the weight variance based on the number of input features for each feature type. Otherwise, the relative importance of each feature goes with the number of feature dimensions. """ self.node_linears = nn.ModuleList( [nn.Linear(l.dim_out, self.dim_nodes) for l in self.node_layers] ) self.edge_linears = nn.ModuleList( [nn.Linear(l.dim_out, self.dim_edges) for l in self.edge_layers] ) return def forward( self, X: torch.Tensor, C: torch.Tensor, edge_idx: Optional[torch.LongTensor] = None, mask_ij: torch.Tensor = None, custom_D: Optional[torch.Tensor] = None, custom_mask_2D: Optional[torch.Tensor] = None, ) -> Tuple[ torch.Tensor, torch.Tensor, torch.LongTensor, torch.Tensor, torch.Tensor ]: mask_i = chain_map_to_mask(C) if mask_ij is None or edge_idx is None: edge_idx, mask_ij = self.graph_builder( X, C, custom_D=custom_D, custom_mask_2D=custom_mask_2D ) # Aggregate node layers node_h = None for i, layer in enumerate(self.node_layers): node_h_l = layer(X, edge_idx, C) if self.centered: node_h_l = node_h_l - self.__getattr__(f"node_means_{i}") node_h_l = self.node_linears[i](node_h_l) node_h = node_h_l if node_h is None else node_h + node_h_l if node_h is None: node_h = torch.zeros(list(X.shape[:2]) + [self.dim_nodes], device=X.device) # Aggregate edge layers edge_h = None for i, layer in enumerate(self.edge_layers): edge_h_l = layer(X, edge_idx, C) if self.centered: edge_h_l = edge_h_l - self.__getattr__(f"edge_means_{i}") edge_h_l = self.edge_linears[i](edge_h_l) edge_h = edge_h_l if edge_h is None else edge_h + edge_h_l if edge_h is None: edge_h = torch.zeros(list(X.shape[:2]) + [self.dim_nodes], device=X.device) # Apply masks node_h = mask_i.unsqueeze(-1) * node_h edge_h = mask_ij.unsqueeze(-1) * edge_h return node_h, edge_h, edge_idx, mask_i, mask_ij def _load_centering_params(self, reference_pdb: str): basepath = os.path.dirname(os.path.abspath(__file__)) + "/params/" if not os.path.exists(basepath): os.makedirs(basepath) filename = f"centering_{reference_pdb}.params" self.centering_file = basepath + filename key = ( reference_pdb + ";" + json.dumps(self.node_features) + ";" + json.dumps(self.edge_features) ) # Attempt to load saved centering params, otherwise compute and cache json_line = None with open(self.centering_file, "a+") as f: prefix = key + "\t" f.seek(0) for line in f: if line.startswith(prefix): json_line = line.split(prefix)[1] break if json_line is not None: print("Loaded from cache") param_dictionary = json.loads(json_line) else: print(f"Computing reference stats for {reference_pdb}") param_dictionary = self._reference_stats(reference_pdb) json_line = json.dumps(param_dictionary) f.write(prefix + "\t" + json_line + "\n") for i, layer in enumerate(self.node_layers): key = json.dumps(self.node_features[i]) tensor = torch.tensor(param_dictionary[key], dtype=torch.float32) tensor = tensor.view(1, 1, -1) self.register_buffer(f"node_means_{i}", tensor) for i, layer in enumerate(self.edge_layers): key = json.dumps(self.edge_features[i]) tensor = torch.tensor(param_dictionary[key], dtype=torch.float32) tensor = tensor.view(1, 1, -1) self.register_buffer(f"edge_means_{i}", tensor) return def _reference_stats(self, reference_pdb): X, C, _ = Protein.from_PDBID(reference_pdb).to_XCS() stats_dict = self._feature_stats(X, C) return stats_dict def _feature_stats(self, X, C, verbose=False, center=False): mask_i = chain_map_to_mask(C) edge_idx, mask_ij = self.graph_builder(X, C) def _masked_stats(feature, mask, dims, verbose=False): mask = mask.unsqueeze(-1) feature = mask * feature sum_mask = mask.sum() mean = feature.sum(dims, keepdim=True) / sum_mask var = torch.sum(mask * (feature - mean) ** 2, dims) / sum_mask std = torch.sqrt(var) mean = mean.view(-1) std = std.view(-1) if verbose: frac = (100.0 * std ** 2 / (mean ** 2 + std ** 2)).type(torch.int32) print(f"Fraction of raw variance: {frac}") return mean, std # Collect statistics stats_dict = {} # Aggregate node layers for i, layer in enumerate(self.node_layers): node_h = layer(X, edge_idx, C) if center: node_h = node_h - self.__getattr__(f"node_means_{i}") mean, std = _masked_stats(node_h, mask_i, dims=[0, 1]) # Store in dictionary key = json.dumps(self.node_features[i]) stats_dict[key] = mean.tolist() # Aggregate node layers for i, layer in enumerate(self.edge_layers): edge_h = layer(X, edge_idx, C) if center: edge_h = edge_h - self.__getattr__(f"edge_means_{i}") mean, std = _masked_stats(edge_h, mask_ij, dims=[0, 1, 2]) # Store in dictionary key = json.dumps(self.edge_features[i]) stats_dict[key] = mean.tolist() # Round to small number of decimal places stats_dict = {k: [round(f, 3) for f in v] for k, v in stats_dict.items()} return stats_dict class ProteinGraph(nn.Module): """Build a graph topology given a protein backbone. Args: num_neighbors (int): Maximum number of neighbors in the graph. distance_atom_type (int): Atom type for computing residue-residue distances for graph construction. Negative values will specify centroid across atom types. Default is `-1` (centroid). cutoff (float): Cutoff distance for graph construction. If not None, mask any edges further than this cutoff. Default is `None`. mask_interfaces (Boolean): Restrict connections only to within chains, excluding-between chain interactions. Default is `False`. criterion (string, optional): Method used for building graph from distances. Currently supported methods are `{knn, random_log, random_linear}`. Default is `knn`. random_alpha (float, optional): Length scale parameter for random graph generation. Default is 3. random_temperature (float, optional): Temperature parameter for random graph sampling. Between 0 and 1 this value will interpolate between a normal k-NN graph and sampling from the graph generation process. Default is 1.0. Inputs: X (torch.Tensor): Backbone coordinates with shape `(num_batch, num_residues, 4, 3)`. C (torch.LongTensor): Chain map with shape `(num_batch, num_residues)`. custom_D (torch.Tensor, optional): Optional external distance map, for example based on other distance metrics, with shape `(num_batch, num_residues, num_residues)`. custom_mask_2D (torch.Tensor, optional): Optional mask to apply to distances before computing dissimilarities with shape `(num_batch, num_residues, num_residues)`. Outputs: edge_idx (torch.LongTensor): Edge indices for neighbors with shape `(num_batch, num_residues, num_neighbors)`. mask_ij (torch.Tensor): Edge mask with shape `(num_batch, num_nodes, num_neighbors)`. """ def __init__( self, num_neighbors: int = 30, distance_atom_type: int = -1, cutoff: Optional[float] = None, mask_interfaces: bool = False, criterion: str = "knn", random_alpha: float = 3.0, random_temperature: float = 1.0, random_min_local: float = 20, deterministic: bool = False, deterministic_seed: int = 10, ): super(ProteinGraph, self).__init__() self.num_neighbors = num_neighbors self.distance_atom_type = distance_atom_type self.cutoff = cutoff self.mask_interfaces = mask_interfaces
self.distances = geometry.Distances()
5
2023-11-28 00:09:40+00:00
16k
BiQiWHU/CMFormer
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,304
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
3
2023-11-29 15:26:53+00:00
16k
PopicLab/insilicoSV
test/test_processing.py
[ { "identifier": "SV_Simulator", "path": "insilicosv/simulate.py", "snippet": "class SV_Simulator:\n def __init__(self, par_file, log_file=None):\n \"\"\"\n par_file: file location to configuration file (.yaml)\n log_file: location to store log file with diagnostic information if ...
from insilicosv.simulate import SV_Simulator from insilicosv.processing import FormatterIO from test_simulate import TestObject from pysam import VariantFile, FastaFile from collections import defaultdict, Counter from insilicosv.utils import NestedDict from insilicosv import utils from insilicosv import constants import unittest import sys import os
14,137
"max_length": [4], "min_length": [2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)}
class TestProcObject(TestObject): def __init__(self, ref, par, hap1, hap2, bed, vcf): self.vcf = vcf super().__init__(ref, par, hap1, hap2, bed) def extract_bed_records(self): # parse bed record into dict for easy comparison # --> example split bed record: ['chr19', '0', '3', 'chr19', '0', '3', 'DEL', '3', '1/1', 'DEL', '1'] bed_records = [] with open(self.bed) as f: for line in f: ln = line.split() bed_record = {'source_chr': ln[0], 'source_s': ln[1], 'source_e': ln[2], 'target_chr': ln[3], 'target_s': ln[4], 'target_e': ln[5], 'ev_type': ln[6], 'len': ln[7], 'zyg': ln[8], 'parent_type': ln[9], 'sv_id': ln[10]} bed_records.append(bed_record) return bed_records def extract_vcf_records(self): vcf_records = [] vcf = VariantFile(self.vcf) for rec in vcf.fetch(): ln = str(rec).split() # separately parse info field of the form: 'END=45590417;SVTYPE=dDUP;SVLEN=539;TARGET=45581738' info = {field.split('=')[0]: field.split('=')[1] for field in ln[7].split(';')} vcf_record = {'CHROM': ln[0], 'POS': ln[1], 'ID': ln[2], 'REF': ln[3], 'ALT': ln[4], 'QUAL': ln[5], 'FILTER': ln[6], 'INFO': info, 'FORMAT': ln[8], 'SAMPLE': ln[9]} vcf_records.append(vcf_record) return vcf_records class TestProcessing(unittest.TestCase): def setUp(self): # runs before every test self.ref_file = "test/inputs/test.fa" self.par = "test/inputs/par.yaml" self.hap1 = "test/inputs/test1.fa" self.hap2 = "test/inputs/test2.fa" self.bed = "test/inputs/out.bed" self.vcf = "test/inputs/out.vcf" self.ins_fasta = "test/inputs/ins_fasta.fa" self.test_overlap_bed = "test/inputs/example_overlap_events.bed" self.test_overlap_bed_2 = "test/inputs/example_overlap_events_2.bed" # test_overlap_bed_3: events with differing chromosome self.test_overlap_bed_3 = "test/inputs/example_overlap_events_3.bed" self.test_overlap_bed_4 = "test/inputs/example_overlap_events_4.bed" self.test_overlap_bed_11 = "test/inputs/example_overlap_events_11.bed" self.test_objects_simple_events = {'DEL': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DEL", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'DUP': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DUP", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INV", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS': TestProcObject([self.ref_file, {"chr19": "C"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INS", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_flanked_inversions = {'dupINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'dupINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dispersions = {'dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'TRA': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "TRA", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_del_inv = {'delINV': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINV", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdel", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_idel = {'dDUP_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INS_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dup_inv = {'dup_INV': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dup_INV", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dup': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dup", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_INVdup = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 1, "max_length": [4], "min_length": [4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_multievent = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTGCTAATGCGTTCACTGCTAATGCGTTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 200, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 3, "max_length": [4], "min_length": [2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)}
self.formatter = FormatterIO(self.par)
1
2023-12-01 14:39:20+00:00
16k
BiQiWHU/BWG
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
12,296
@classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...")
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...")
model = SemanticSegmentorWithTTA(cfg, model)
6
2023-11-29 17:15:46+00:00
16k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/bark.py
[ { "identifier": "codec_decode", "path": "TTS/tts/layers/bark/inference_funcs.py", "snippet": "def codec_decode(fine_tokens, model):\n \"\"\"Turn quantized audio codes into audio array using encodec.\"\"\"\n arr = torch.from_numpy(fine_tokens)[None]\n arr = arr.to(model.device)\n arr = arr.tr...
import os import numpy as np from dataclasses import dataclass from typing import Optional from coqpit import Coqpit from encodec import EncodecModel from transformers import BertTokenizer from TTS.tts.layers.bark.inference_funcs import ( codec_decode, generate_coarse, generate_fine, generate_text_semantic, generate_voice, load_voice, ) from TTS.tts.layers.bark.load_model import load_model from TTS.tts.layers.bark.model import GPT from TTS.tts.layers.bark.model_fine import FineGPT from TTS.tts.models.base_tts import BaseTTS
14,198
model_type="coarse", ) self.fine_model, self.config = load_model( ckpt_path=self.config.LOCAL_MODEL_PATHS["fine"], device=self.device, config=self.config, model_type="fine" ) def train_step( self, ): pass def text_to_semantic( self, text: str, history_prompt: Optional[str] = None, temp: float = 0.7, base=None, allow_early_stop=True, **kwargs, ): """Generate semantic array from text. Args: text: text to be turned into audio history_prompt: history choice for audio cloning temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy semantic array to be fed into `semantic_to_waveform` """ x_semantic = generate_text_semantic( text, self, history_prompt=history_prompt, temp=temp, base=base, allow_early_stop=allow_early_stop, **kwargs, ) return x_semantic def semantic_to_waveform( self, semantic_tokens: np.ndarray, history_prompt: Optional[str] = None, temp: float = 0.7, base=None, ): """Generate audio array from semantic input. Args: semantic_tokens: semantic token output from `text_to_semantic` history_prompt: history choice for audio cloning temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy audio array at sample frequency 24khz """ x_coarse_gen = generate_coarse( semantic_tokens, self, history_prompt=history_prompt, temp=temp, base=base, ) x_fine_gen = generate_fine( x_coarse_gen, self, history_prompt=history_prompt, temp=0.5, base=base, ) audio_arr = codec_decode(x_fine_gen, self) return audio_arr, x_coarse_gen, x_fine_gen def generate_audio( self, text: str, history_prompt: Optional[str] = None, text_temp: float = 0.7, waveform_temp: float = 0.7, base=None, allow_early_stop=True, **kwargs, ): """Generate audio array from input text. Args: text: text to be turned into audio history_prompt: history choice for audio cloning text_temp: generation temperature (1.0 more diverse, 0.0 more conservative) waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy audio array at sample frequency 24khz """ x_semantic = self.text_to_semantic( text, history_prompt=history_prompt, temp=text_temp, base=base, allow_early_stop=allow_early_stop, **kwargs, ) audio_arr, c, f = self.semantic_to_waveform( x_semantic, history_prompt=history_prompt, temp=waveform_temp, base=base ) return audio_arr, [x_semantic, c, f] def generate_voice(self, audio, speaker_id, voice_dir): """Generate a voice from the given audio and text. Args: audio (str): Path to the audio file. speaker_id (str): Speaker name. voice_dir (str): Path to the directory to save the generate voice. """ if voice_dir is not None: voice_dirs = [voice_dir] try:
@dataclass class BarkAudioConfig(Coqpit): sample_rate: int = 24000 output_sample_rate: int = 24000 class Bark(BaseTTS): def __init__( self, config: Coqpit, tokenizer: BertTokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased"), ) -> None: super().__init__(config=config, ap=None, tokenizer=None, speaker_manager=None, language_manager=None) self.config.num_chars = len(tokenizer) self.tokenizer = tokenizer self.semantic_model = GPT(config.semantic_config) self.coarse_model = GPT(config.coarse_config) self.fine_model = FineGPT(config.fine_config) self.encodec = EncodecModel.encodec_model_24khz() self.encodec.set_target_bandwidth(6.0) @property def device(self): return next(self.parameters()).device def load_bark_models(self): self.semantic_model, self.config = load_model( ckpt_path=self.config.LOCAL_MODEL_PATHS["text"], device=self.device, config=self.config, model_type="text" ) self.coarse_model, self.config = load_model( ckpt_path=self.config.LOCAL_MODEL_PATHS["coarse"], device=self.device, config=self.config, model_type="coarse", ) self.fine_model, self.config = load_model( ckpt_path=self.config.LOCAL_MODEL_PATHS["fine"], device=self.device, config=self.config, model_type="fine" ) def train_step( self, ): pass def text_to_semantic( self, text: str, history_prompt: Optional[str] = None, temp: float = 0.7, base=None, allow_early_stop=True, **kwargs, ): """Generate semantic array from text. Args: text: text to be turned into audio history_prompt: history choice for audio cloning temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy semantic array to be fed into `semantic_to_waveform` """ x_semantic = generate_text_semantic( text, self, history_prompt=history_prompt, temp=temp, base=base, allow_early_stop=allow_early_stop, **kwargs, ) return x_semantic def semantic_to_waveform( self, semantic_tokens: np.ndarray, history_prompt: Optional[str] = None, temp: float = 0.7, base=None, ): """Generate audio array from semantic input. Args: semantic_tokens: semantic token output from `text_to_semantic` history_prompt: history choice for audio cloning temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy audio array at sample frequency 24khz """ x_coarse_gen = generate_coarse( semantic_tokens, self, history_prompt=history_prompt, temp=temp, base=base, ) x_fine_gen = generate_fine( x_coarse_gen, self, history_prompt=history_prompt, temp=0.5, base=base, ) audio_arr = codec_decode(x_fine_gen, self) return audio_arr, x_coarse_gen, x_fine_gen def generate_audio( self, text: str, history_prompt: Optional[str] = None, text_temp: float = 0.7, waveform_temp: float = 0.7, base=None, allow_early_stop=True, **kwargs, ): """Generate audio array from input text. Args: text: text to be turned into audio history_prompt: history choice for audio cloning text_temp: generation temperature (1.0 more diverse, 0.0 more conservative) waveform_temp: generation temperature (1.0 more diverse, 0.0 more conservative) Returns: numpy audio array at sample frequency 24khz """ x_semantic = self.text_to_semantic( text, history_prompt=history_prompt, temp=text_temp, base=base, allow_early_stop=allow_early_stop, **kwargs, ) audio_arr, c, f = self.semantic_to_waveform( x_semantic, history_prompt=history_prompt, temp=waveform_temp, base=base ) return audio_arr, [x_semantic, c, f] def generate_voice(self, audio, speaker_id, voice_dir): """Generate a voice from the given audio and text. Args: audio (str): Path to the audio file. speaker_id (str): Speaker name. voice_dir (str): Path to the directory to save the generate voice. """ if voice_dir is not None: voice_dirs = [voice_dir] try:
_ = load_voice(speaker_id, voice_dirs)
5
2023-11-29 08:15:06+00:00
16k
AILab-CVC/UniRepLKNet
Video/dataset/build.py
[ { "identifier": "RawFrameClsDataset", "path": "Video/dataset/datasets.py", "snippet": "class RawFrameClsDataset(Dataset):\n \"\"\"Load your own raw frame classification dataset.\"\"\"\n\n def __init__(self,\n anno_path,\n data_root,\n mode='train',\n...
import os from .datasets import RawFrameClsDataset, VideoClsDataset from .pretrain_datasets import ( # noqa: F401 DataAugmentationForVideoMAEv2, HybridVideoMAE, VideoMAE, )
11,089
# -------------------------------------------------------- # Based on BEiT, timm, DINO and DeiT code bases # https://github.com/microsoft/unilm/tree/master/beit # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/facebookresearch/deit # https://github.com/facebookresearch/dino # --------------------------------------------------------' def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAEv2(args) dataset = VideoMAE( root=args.data_root, setting=args.data_path, train=True, test_mode=False, name_pattern=args.fname_tmpl, video_ext='mp4', is_color=True, modality='rgb', num_segments=1, num_crop=1, new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, lazy_init=False, num_sample=args.num_sample) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if is_train: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'val.csv') if args.data_set == 'Kinetics-400': if not args.sparse_sample:
# -------------------------------------------------------- # Based on BEiT, timm, DINO and DeiT code bases # https://github.com/microsoft/unilm/tree/master/beit # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/facebookresearch/deit # https://github.com/facebookresearch/dino # --------------------------------------------------------' def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAEv2(args) dataset = VideoMAE( root=args.data_root, setting=args.data_path, train=True, test_mode=False, name_pattern=args.fname_tmpl, video_ext='mp4', is_color=True, modality='rgb', num_segments=1, num_crop=1, new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, lazy_init=False, num_sample=args.num_sample) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if is_train: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'val.csv') if args.data_set == 'Kinetics-400': if not args.sparse_sample:
dataset = VideoClsDataset(
1
2023-11-24 07:28:22+00:00
16k
wenquanlu/HandRefiner
handrefiner.py
[ { "identifier": "handrefiner_root", "path": "config.py", "snippet": "" }, { "identifier": "create_model", "path": "cldm/model.py", "snippet": "def create_model(config_path):\n config = OmegaConf.load(config_path)\n model = instantiate_from_config(config.model).cpu()\n print(f'Lo...
import sys import os import argparse import json import torch import numpy as np import cv2 import numpy as np import cv2 import config import cv2 import einops import numpy as np import torch import random import ast from config import handrefiner_root from PIL import Image from torchvision import transforms from pytorch_lightning import seed_everything from cldm.model import create_model, load_state_dict from cldm.ddim_hacked import DDIMSampler from pathlib import Path from preprocessor.meshgraphormer import MeshGraphormerMediapipe
10,879
# input image parser.add_argument('--input_img', type=str, default="") # output directory where the rectified images will be saved to parser.add_argument('--out_dir', type=str, default="") # file where the mpjpe values will be logged to parser.add_argument('--log_json', type=str, default="") # control strength for ControlNet parser.add_argument('--strength', type=float, default=1.0) # directory where the depth maps will be saved to. Leaving it empty will disable this function parser.add_argument('--depth_dir', type=str, default="") # directory where the masks will be saved to. Leaving it empty will disable this function parser.add_argument('--mask_dir', type=str, default="") # whether evaluate the mpjpe error in fixed control strength mode parser.add_argument('--eval', type=ast.literal_eval, default=False) # whether use finetuned ControlNet trained on synthetic images as introduced in the paper parser.add_argument('--finetuned', type=ast.literal_eval, default=True) # path to the SD + ControlNet weights parser.add_argument('--weights', type=str, default="") # batch size parser.add_argument('--num_samples', type=int, default=1) # prompt file for multi-image rectification # see manual.md for file format parser.add_argument('--prompt_file', type=str, default="") # prompt for single image rectification parser.add_argument('--prompt', type=str, default="") # number of generation iteration for each image to be rectified # in general, for each input image, n_iter x num_samples number of rectified images will be produced parser.add_argument('--n_iter', type=int, default=1) # adaptive control strength as introduced in paper (we tend to use fixed control strength as default) parser.add_argument('--adaptive_control', type=ast.literal_eval, default=False) # padding controls the size of masks around the hand parser.add_argument('--padding_bbox', type=int, default=30) # set seed parser.add_argument('--seed', type=int, default=-1) args = parser.parse_args() return args args = parse_args() if (args.prompt_file != "" and args.prompt != "") or (args.prompt_file == "" and args.prompt == ""): raise Exception("Please specify one and only one of the --prompt and --prompt_file") if (args.input_dir != "" and args.input_img != "") or (args.input_dir == "" and args.input_img == ""): raise Exception("Please specify one and only one of the --input_dir and --input_img") model = create_model("control_depth_inpaint.yaml").cpu() if args.finetuned: model.load_state_dict(load_state_dict(args.weights, location='cuda'), strict=False) else: model.load_state_dict( load_state_dict("models/sd-v1-5-inpainting.ckpt", location="cuda"), strict=False ) model.load_state_dict( load_state_dict("models/control_v11f1p_sd15_depth.pth", location="cuda"), strict=False, ) model = model.to("cuda") meshgraphormer = MeshGraphormerMediapipe() if args.log_json != "": f_mpjpe = open(args.log_json, 'w') # prompt needs to be same for all pictures in the same batch if args.input_img != "": assert args.prompt_file == "", "prompt file should not be used for single image rectification" inputs = [args.input_img] else: if args.prompt_file != "": f_prompt = open(args.prompt_file) inputs = f_prompt.readlines() else: inputs = os.listdir(args.input_dir) for file_info in inputs: if args.prompt_file != "": file_info = json.loads(file_info) file_name = file_info["img"] prompt = file_info["txt"] else: file_name = file_info prompt = args.prompt image_file = os.path.join(args.input_dir, file_name) file_name_raw = Path(file_name).stem # STEP 3: Load the input image. image = np.array(Image.open(image_file)) raw_image = image H, W, C = raw_image.shape gen_count = 0 for iteration in range(args.n_iter): depthmap, mask, info = meshgraphormer.get_depth(args.input_dir, file_name, args.padding_bbox) if args.depth_dir != "": cv2.imwrite(os.path.join(args.depth_dir, file_name_raw + "_depth.jpg"), depthmap) if args.mask_dir != "": cv2.imwrite(os.path.join(args.mask_dir, file_name_raw + "_mask.jpg"), mask) control = depthmap
# STEP 1: Import the necessary modules. from __future__ import absolute_import, division, print_function def load(): paths = [handrefiner_root, os.path.join(handrefiner_root, 'MeshGraphormer'), os.path.join(handrefiner_root, 'preprocessor')] for p in paths: sys.path.insert(0, p) load() transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) def parse_args(): parser = argparse.ArgumentParser() # input directory containing images to be rectified parser.add_argument('--input_dir', type=str, default="") # input image parser.add_argument('--input_img', type=str, default="") # output directory where the rectified images will be saved to parser.add_argument('--out_dir', type=str, default="") # file where the mpjpe values will be logged to parser.add_argument('--log_json', type=str, default="") # control strength for ControlNet parser.add_argument('--strength', type=float, default=1.0) # directory where the depth maps will be saved to. Leaving it empty will disable this function parser.add_argument('--depth_dir', type=str, default="") # directory where the masks will be saved to. Leaving it empty will disable this function parser.add_argument('--mask_dir', type=str, default="") # whether evaluate the mpjpe error in fixed control strength mode parser.add_argument('--eval', type=ast.literal_eval, default=False) # whether use finetuned ControlNet trained on synthetic images as introduced in the paper parser.add_argument('--finetuned', type=ast.literal_eval, default=True) # path to the SD + ControlNet weights parser.add_argument('--weights', type=str, default="") # batch size parser.add_argument('--num_samples', type=int, default=1) # prompt file for multi-image rectification # see manual.md for file format parser.add_argument('--prompt_file', type=str, default="") # prompt for single image rectification parser.add_argument('--prompt', type=str, default="") # number of generation iteration for each image to be rectified # in general, for each input image, n_iter x num_samples number of rectified images will be produced parser.add_argument('--n_iter', type=int, default=1) # adaptive control strength as introduced in paper (we tend to use fixed control strength as default) parser.add_argument('--adaptive_control', type=ast.literal_eval, default=False) # padding controls the size of masks around the hand parser.add_argument('--padding_bbox', type=int, default=30) # set seed parser.add_argument('--seed', type=int, default=-1) args = parser.parse_args() return args args = parse_args() if (args.prompt_file != "" and args.prompt != "") or (args.prompt_file == "" and args.prompt == ""): raise Exception("Please specify one and only one of the --prompt and --prompt_file") if (args.input_dir != "" and args.input_img != "") or (args.input_dir == "" and args.input_img == ""): raise Exception("Please specify one and only one of the --input_dir and --input_img") model = create_model("control_depth_inpaint.yaml").cpu() if args.finetuned: model.load_state_dict(load_state_dict(args.weights, location='cuda'), strict=False) else: model.load_state_dict( load_state_dict("models/sd-v1-5-inpainting.ckpt", location="cuda"), strict=False ) model.load_state_dict( load_state_dict("models/control_v11f1p_sd15_depth.pth", location="cuda"), strict=False, ) model = model.to("cuda") meshgraphormer = MeshGraphormerMediapipe() if args.log_json != "": f_mpjpe = open(args.log_json, 'w') # prompt needs to be same for all pictures in the same batch if args.input_img != "": assert args.prompt_file == "", "prompt file should not be used for single image rectification" inputs = [args.input_img] else: if args.prompt_file != "": f_prompt = open(args.prompt_file) inputs = f_prompt.readlines() else: inputs = os.listdir(args.input_dir) for file_info in inputs: if args.prompt_file != "": file_info = json.loads(file_info) file_name = file_info["img"] prompt = file_info["txt"] else: file_name = file_info prompt = args.prompt image_file = os.path.join(args.input_dir, file_name) file_name_raw = Path(file_name).stem # STEP 3: Load the input image. image = np.array(Image.open(image_file)) raw_image = image H, W, C = raw_image.shape gen_count = 0 for iteration in range(args.n_iter): depthmap, mask, info = meshgraphormer.get_depth(args.input_dir, file_name, args.padding_bbox) if args.depth_dir != "": cv2.imwrite(os.path.join(args.depth_dir, file_name_raw + "_depth.jpg"), depthmap) if args.mask_dir != "": cv2.imwrite(os.path.join(args.mask_dir, file_name_raw + "_mask.jpg"), mask) control = depthmap
ddim_sampler = DDIMSampler(model)
3
2023-11-24 10:19:23+00:00
16k
eth-sri/language-model-arithmetic
src/model_arithmetic/evaluation.py
[ { "identifier": "BaseClass", "path": "src/model_arithmetic/base.py", "snippet": "class BaseClass:\n \"\"\"\n Base class for providing a serialization and deserialization mechanism.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Instantiates the base class with keyword argu...
from .base import BaseClass from loguru import logger from transformers import set_seed, Trainer, TrainingArguments, DataCollatorWithPadding from .dataset import CustomDataset from sklearn.model_selection import train_test_split from .basic_model_loader import load_model, load_tokenizer from .model_arithmetic import ModelArithmetic from googleapiclient import discovery from dotenv import load_dotenv from torch.utils.data import DataLoader from .utils import ENABLE_LOGGING, log from lm_eval import evaluator import pandas as pd import numpy as np import torch import os import json import time
13,813
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """
log(logger.debug, "Preparing dataset")
6
2023-11-21 20:01:08+00:00
16k
huang-yh/SelfOcc
model/encoder/tpvformer/tpvformer_encoder.py
[ { "identifier": "BaseEncoder", "path": "model/encoder/base_encoder.py", "snippet": "class BaseEncoder(BaseModule):\n \"\"\"Further encode 3D representations.\n image backbone -> neck -> lifter -> encoder -> segmentor\n \"\"\"\n\n def __init__(self, init_cfg=None, **kwargs):\n super()....
from mmseg.registry import MODELS from mmcv.cnn.bricks.transformer import build_positional_encoding, build_transformer_layer from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention from mmengine.model import ModuleList from torch.nn.init import normal_ from mmengine.logging import MMLogger from ..base_encoder import BaseEncoder from ..bevformer.utils import point_sampling from .utils import get_cross_view_ref_points from ..bevformer.mappings import GridMeterMapping from ..bevformer.attention import BEVCrossAttention, BEVDeformableAttention from .attention import TPVCrossAttention, CrossViewHybridAttention from .modules import CameraAwareSE import torch.nn as nn, torch, copy
11,499
num_feature_levels=4, positional_encoding=None, num_points_cross=[64, 64, 8], num_points_self=[16, 16, 16], transformerlayers=None, num_layers=None, camera_aware=False, camera_aware_mid_channels=None, init_cfg=None): super().__init__(init_cfg) # self.bev_inner = bev_inner # self.bev_outer = bev_outer # self.range_inner = range_inner # self.range_outer = range_outer # assert nonlinear_mode == 'linear_upscale' # TODO # self.nonlinear_mode = nonlinear_mode # self.z_inner = z_inner # self.z_outer = z_outer # self.z_ranges = z_ranges self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False)
logger = MMLogger.get_instance('selfocc') @MODELS.register_module() class TPVFormerEncoder(BaseEncoder): def __init__( self, mapping_args: dict, # bev_inner=128, # bev_outer=32, # range_inner=51.2, # range_outer=51.2, # nonlinear_mode='linear_upscale', # z_inner=20, # z_outer=10, # z_ranges=[-5.0, 3.0, 11.0], embed_dims=128, num_cams=6, num_feature_levels=4, positional_encoding=None, num_points_cross=[64, 64, 8], num_points_self=[16, 16, 16], transformerlayers=None, num_layers=None, camera_aware=False, camera_aware_mid_channels=None, init_cfg=None): super().__init__(init_cfg) # self.bev_inner = bev_inner # self.bev_outer = bev_outer # self.range_inner = range_inner # self.range_outer = range_outer # assert nonlinear_mode == 'linear_upscale' # TODO # self.nonlinear_mode = nonlinear_mode # self.z_inner = z_inner # self.z_outer = z_outer # self.z_ranges = z_ranges self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False)
cross_view_ref_points = get_cross_view_ref_points(size_h, size_w, size_d, num_points_self)
2
2023-11-20 12:49:14+00:00
16k
MobileTeleSystems/CoolGraph
cool_graph/runners.py
[ { "identifier": "RawDataProcessor", "path": "cool_graph/data/data_processor.py", "snippet": "class RawDataProcessor:\n \"\"\"\n Preprocessing datasets.\n\n Args:\n groups_names (Dict[int, str]): Name of groups in nodes.\n group_names_node_features (Dict[str, List[str]]): Name of f...
import os import pathlib import hydra import numpy as np import optuna import pandas as pd import torch from datetime import datetime from itertools import product from pathlib import Path from typing import Dict, List, Literal, Optional from hydra import ( compose, core, initialize, initialize_config_dir, initialize_config_module, ) from omegaconf import DictConfig, OmegaConf from optuna.trial import TrialState from sklearn.model_selection import train_test_split from torch_geometric.data import Data from torch_geometric.loader import NeighborLoader, NeighborSampler from tqdm import tqdm from cool_graph.data import RawDataProcessor from cool_graph.data.batch import get_auto_batch_size from cool_graph.data.loaders import create_loaders from cool_graph.logging import setup_mlflow_from_config from cool_graph.parameter_search import ( model_params_to_trial_params, sample_model_params, ) from cool_graph.train import Trainer
12,135
groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() return result class HypeRunner(BaseRunner): """ Runner for optimization model with Optuna. https://optuna.readthedocs.io/en/stable/reference/index.html 1st trial - with default config params (hyper_params). Also, 2nd trial - you can add own trial as argument enqueue_trial in optimazire_run method, and next trial optuna optimize model params randomly, if set None randomly optimization after 1st default trial. Args: data (Data): Loaded dataset. config (DictConfig): Confif with patams (model_params, logging, training, metrics). Default to None. config_path (str): Path with config structure (can be loaded with cli get_config). Default to None. overrides (list): Own params in list. Default to None. train_size (int): Own train size. Default to None. test (int): Own test size. Default to None. seed (int): The desired seed. Default to None. train_idx (list): List of train indices. test_idx (list): List of test indices. Examples -------- >>> from cool_graph.runners import HypeRunner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float:
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict: assert path_base in ("cfg", "cwd") core.global_hydra.GlobalHydra.instance().clear() if os.path.isabs(config): config_path = pathlib.Path(config).parent else: config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent config_name = pathlib.Path(config).name.replace(".yaml", "") initialize_config_dir(str(config_path), version_base=None) cfg = compose(config_name=config_name, overrides=overrides) return cfg class ConfigRunner: r"""Runner for cli mode. Using only in cli. This class allows to load data + split data per batchs + split data per train/val + training. See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ def __init__(self, config: Optional[DictConfig]) -> None: cfg = OmegaConf.to_container(config, resolve=True) self.cfg = cfg self.target_names = cfg["training"]["targets"] self.groups_names = cfg["data"]["groups_names"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.read_edge_attr = cfg["data"].get("read_edge_attr", True) self.batch_size = cfg["training"]["batch_size"] self.group_mask_col = cfg["data"]["group_mask_col"] self.label_mask_col = cfg["data"]["label_mask_col"] self.label_cols = cfg["data"]["label_cols"] self.label_index_col = cfg["data"]["label_index_col"] self.edge_index_cols = cfg["data"]["edge_index_cols"] self.num_neighbors = cfg["training"]["num_neighbors"] self.features_edges_names = cfg["data"].get("features_edges") self.group_names_node_features = cfg["data"]["features"] self.train_paths = cfg["data"]["train"] self.val_paths = cfg["data"]["validation"] self.metrics = cfg["metrics"] self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) os.makedirs(self.chkpt_dir, exist_ok=True) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Using RawDataProcessor from cool_graph.data for preprocessing data from disk. """ self.train_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.train_paths["nodes_path"], mon_edges_path=self.train_paths["edges_path"], mon_labels_path=self.train_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) self.val_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.val_paths["nodes_path"], mon_edges_path=self.val_paths["edges_path"], mon_labels_path=self.val_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) def sample_data( self, seed=0 ) -> Dict[Literal["train", "validation"], List[torch.utils.data.DataLoader]]: """ Sampling data in batches. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [len(v) for _, v in self.group_names_node_features.items()], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=len(self.cfg["data"].get("features_edges", [])), device=self.cfg["training"]["device"], num_neighbors=self.cfg["training"]["num_neighbors"], ) else: self._batch_size = self.batch_size train_loaders = self.train_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) val_loaders = self.val_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) return {"train": train_loaders, "validation": val_loaders} def run(self, seed: int = 0) -> Dict[str, float]: """ Train model for train_samples and val_sampler. Args: seed (int): seed for training. Default to 0. Returns: result (dict): Result of training for each 5 epochs with metrics from config. """ if not (hasattr(self, "train_sampler") and hasattr(self, "val_sampler")): self.init_loaders() sampled = self.sample_data(seed=seed) train_loaders = sampled["train"] val_loaders = sampled["validation"] self.trainer = Trainer( train_loaders, val_loaders, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.cfg["training"]["loss"].get("target_weights"), loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.cfg["data"]["groups_names"], mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.cfg["training"]["targets"], use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, **self.cfg["model_params"], groups_names_num_features={ k: len(v) for k, v in self.group_names_node_features.items() }, num_edge_features=len(self.cfg["data"].get("features_edges", [])), metrics=self.metrics, ) result = self.trainer.train() return result class BaseRunner: def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ) -> None: """ Main class for Basic runner and Runner with Optuna. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (list): Indices for train data. Defaults to None. test_idx (list): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. """ if config is None: if config_path is None: if use_edge_attr: config_path = "./config/in_memory_data2.yaml" else: config_path = "./config/in_memory_data.yaml" config_path = os.path.join(os.path.dirname(__file__), config_path) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) cfg = OmegaConf.to_container(config, resolve=True) self.data = data self.cfg = cfg self.test_size = test_size self.train_size = train_size self.seed = seed self.train_idx = train_idx self.test_idx = test_idx self.use_edge_attr = use_edge_attr if use_edge_attr and data.edge_attr is None: raise BaseException( "data does not contain edge_attr, please set use_edge_attr=False" ) self.target_names = cfg["training"]["targets"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.batch_size = cfg["training"]["batch_size"] self.num_neighbors = cfg["training"]["num_neighbors"] self.metrics = cfg["metrics"] self.data.group_mask = torch.zeros(len(data.x), dtype=torch.int8) self.data.label_mask = torch.ones(len(data.x), dtype=torch.bool) self.groups_names = {0: "x"} self.groups_names_num_features = {"x": data.x.shape[1]} if len(data.y.shape) == 2: self.target_sizes = [] self.target_names = [] self.target_weights = {} for i in range(data.y.shape[1]): y_sub = data.y[:, i] setattr(data, f"y{i}", y_sub) self.target_sizes.append(len(y_sub.unique())) self.target_names.append(f"y{i}") self.target_weights[f"y{i}"] = 1 else: self.target_names = ["y"] self.target_sizes = [len(data.y.unique())] self.target_weights = {"y": 1} if use_edge_attr: self.num_edge_features = data.edge_attr.shape[1] else: self.num_edge_features = 0 self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) for k, v in kwargs.items(): setattr(self, k, v) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Sampling data into batches and sampling data with NeighborLoader into list loaders. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [ self.groups_names_num_features[self.groups_names[i]] for i in range(len(self.groups_names)) ], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=self.num_edge_features, device=self.cfg["training"]["device"], num_neighbors=self.num_neighbors, ) else: self._batch_size = self.batch_size if (self.train_idx is None) or (self.test_idx is None): train_idx, test_idx = train_test_split( torch.nonzero(self.data.label_mask)[:, 0], train_size=self.train_size, test_size=self.test_size, random_state=self.seed, shuffle=True, ) self.train_idx = train_idx self.test_idx = test_idx def sample_date_prerpoc(sampled_data: Data) -> Data: sampled_data.label_mask[sampled_data.batch_size :] = False for group, name in self.groups_names.items(): x = getattr(sampled_data, name)[sampled_data.group_mask == group] setattr(sampled_data, name, x) return sampled_data loader_train = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.train_idx, ) list_loader_train = [] for sampled_data in tqdm(loader_train, desc="Sample data"): list_loader_train.append(sample_date_prerpoc(sampled_data)) self.train_loader = list_loader_train loader_test = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.test_idx, ) list_loader_test = [] for sampled_data in tqdm(loader_test, desc="Sample data"): list_loader_test.append(sample_date_prerpoc(sampled_data)) self.test_loader = list_loader_test class Runner(BaseRunner): """ Runner for notebook launch. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (int): Indices for train data. Defaults to None. test_idx (int): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. Examples -------- >>> from cool_graph.runners import Runner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = Runner(data) >>> result = runner.run() >>> result["best_loss"] {'accuracy': 0.916, 'cross_entropy': 0.286, 'f1_micro': 0.916, 'calc_time': 0.004, 'main_metric': 0.916, 'epoch': 10} Also you can override params in Runner: runner = Runner(data, metrics=['accuracy'], batch_size='auto', train_size=0.7, test_size=0.3, overrides=['training.n_epochs=1'], config_path=path/to/config) result = runner.run() """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, use_edge_attr, **kwargs, ) def run(self) -> Dict[str, float]: """ Training model with params in_memory_data/in_memory_data2 config. See the configs in ./config for knowing what excactly using as logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() return result class HypeRunner(BaseRunner): """ Runner for optimization model with Optuna. https://optuna.readthedocs.io/en/stable/reference/index.html 1st trial - with default config params (hyper_params). Also, 2nd trial - you can add own trial as argument enqueue_trial in optimazire_run method, and next trial optuna optimize model params randomly, if set None randomly optimization after 1st default trial. Args: data (Data): Loaded dataset. config (DictConfig): Confif with patams (model_params, logging, training, metrics). Default to None. config_path (str): Path with config structure (can be loaded with cli get_config). Default to None. overrides (list): Own params in list. Default to None. train_size (int): Own train size. Default to None. test (int): Own test size. Default to None. seed (int): The desired seed. Default to None. train_idx (list): List of train indices. test_idx (list): List of test indices. Examples -------- >>> from cool_graph.runners import HypeRunner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float:
self.cfg["model_params"] = sample_model_params(
5
2023-11-22 09:44:16+00:00
16k
HeliosZhao/Animate124
nerf/network_grid_taichi.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):\ndef biased_softplus(x, bias=0):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n...
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from activation import trunc_exp from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize from tqdm import tqdm
13,213
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x def reset_parameters(self): @torch.no_grad() def weight_init(m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu')) nn.init.zeros_(m.bias) self.apply(weight_init)
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x def reset_parameters(self): @torch.no_grad() def weight_init(m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu')) nn.init.zeros_(m.bias) self.apply(weight_init)
class NeRFNetwork(NeRFRenderer):
1
2023-11-23 10:34:08+00:00
16k
alexzhou907/DreamPropeller
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
11,983
else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
16
2023-11-27 23:39:49+00:00
16k
CineMingle/CineMingle
Movie_Data_Capture.py
[ { "identifier": "get_data_from_json", "path": "scraper.py", "snippet": "def get_data_from_json(\n file_number: str,\n open_cc: opencc.OpenCC,\n specified_source: str, specified_url: str) -> typing.Optional[dict]:\n \n # iterate through all services and fetch the data 从网站上查询片名解...
import argparse import json import os import random import re import sys import time import shutil import typing import urllib3 import signal import platform import config from datetime import datetime, timedelta from lxml import etree from pathlib import Path from opencc import OpenCC from scraper import get_data_from_json from ADC_function import file_modification_days, get_html, parallel_download_files from number_parser import get_number from core import core_main, core_main_no_net_op, moveFailedFolder, debug_print
13,287
# 100MB的日志文件能缩小到3.7MB。 return filepath def signal_handler(*args): """ A signal handler function for handling operating system signals like Ctrl+C (SIGINT). It defines the behavior of the application when such signals are received, such as graceful termination. :param args: Variable argument list, used to handle signal information. """ print('[!]Ctrl+C detected, Exit.') os._exit(9) def sigdebug_handler(*args): """ A signal handler function specifically for toggling debug mode on or off. It alters the debug configuration based on certain system signals (like window size change in Unix systems). :param args: Variable argument list, used to handle signal information. """ conf = config.getInstance() conf.set_override(f"debug_mode:switch={int(not conf.debug())}") print(f"[!]Debug {('oFF', 'On')[int(conf.debug())]}") # 新增失败文件列表跳过处理,及.nfo修改天数跳过处理,提示跳过视频总数,调试模式(-g)下详细被跳过文件,跳过小广告 def movie_lists(source_folder, regexstr: str) -> typing.List[str]: """ Generates a list of movie file paths from the specified source folder. It filters files based on regular expressions and other criteria, such as file type and size. :param source_folder: The folder to scan for movie files. :param regexstr: A regular expression string to filter movie files. :return: A list of paths to the movie files that match the criteria. """ conf = config.getInstance() main_mode = conf.main_mode() debug = conf.debug() nfo_skip_days = conf.nfo_skip_days() link_mode = conf.link_mode() file_type = conf.media_type().lower().split(",") trailerRE = re.compile(r'-trailer\.', re.IGNORECASE) cliRE = None if isinstance(regexstr, str) and len(regexstr): try: cliRE = re.compile(regexstr, re.IGNORECASE) except: pass failed_list_txt_path = Path(conf.failed_folder()).resolve() / 'failed_list.txt' failed_set = set() if (main_mode == 3 or link_mode) and not conf.ignore_failed_list(): try: flist = failed_list_txt_path.read_text(encoding='utf-8').splitlines() failed_set = set(flist) if len(flist) != len(failed_set): # 检查去重并写回,但是不改变failed_list.txt内条目的先后次序,重复的只保留最后的 fset = failed_set.copy() for i in range(len(flist) - 1, -1, -1): fset.remove(flist[i]) if flist[i] in fset else flist.pop(i) failed_list_txt_path.write_text('\n'.join(flist) + '\n', encoding='utf-8') assert len(fset) == 0 and len(flist) == len(failed_set) except: pass if not Path(source_folder).is_dir(): print('[-]Source folder not found!') return [] total = [] # source = Path(source_folder).resolve() source = Path(source_folder) skip_failed_cnt, skip_nfo_days_cnt = 0, 0 escape_folder_set = set(re.split("[,,]", conf.escape_folder())) for full_name in source.glob(r'**/*'): if main_mode != 3 and set(full_name.parent.parts) & escape_folder_set: continue if not full_name.suffix.lower() in file_type: continue absf = str(full_name) if absf in failed_set: skip_failed_cnt += 1 if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue
def check_update(local_version): """ Check for updates by comparing the local version of the application with the latest version available on GitHub. It fetches the latest release information from GitHub and compares the version numbers. If a new version is available, it prints out the update information. :param local_version: The current local version of the application. """ htmlcode = get_html("https://api.github.com/repos/CineMingle/CineMingle/releases/latest") data = json.loads(htmlcode) remote = int(data["tag_name"].replace(".", "")) local_version = int(local_version.replace(".", "")) if local_version < remote: print("[*]" + ("* New update " + str(data["tag_name"]) + " *").center(54)) print("[*]" + "↓ Download ↓".center(54)) print("[*]https://github.com/CineMingle/CineMingle/releases") print("[*]======================================================") def argparse_function(ver: str) -> typing.Tuple[str, str, str, str, bool, bool, str, str]: """ Parses command-line arguments and returns the parsed values. It sets up the argument parser with various options for the application and returns the parsed arguments and their values. It also loads configuration from a config file. :param ver: The version of the application, used for the version argument. :return: A tuple containing various parsed arguments and flags. """ conf = config.getInstance() parser = argparse.ArgumentParser(epilog=f"Load Config file '{conf.ini_path}'.") parser.add_argument("file", default='', nargs='?', help="Single Movie file path.") parser.add_argument("-p", "--path", default='movies', nargs='?', help="Analysis folder path.") parser.add_argument("-m", "--main-mode", default='', nargs='?', help="Main mode. 1:Scraping 2:Organizing 3:Scraping in analysis folder") parser.add_argument("-n", "--number", default='', nargs='?', help="Custom file number of single movie file.") # parser.add_argument("-C", "--config", default='config.ini', nargs='?', help="The config file Path.") parser.add_argument("-L", "--link-mode", default='', nargs='?', help="Create movie file link. 0:moving movie file, do not create link 1:soft link 2:try hard link first") default_logdir = str(Path.home() / '.mlogs') parser.add_argument("-o", "--log-dir", dest='logdir', default=default_logdir, nargs='?', help=f"""Duplicate stdout and stderr to logfiles in logging folder, default on. default folder for current user: '{default_logdir}'. Change default folder to an empty file, or use --log-dir= to turn log off.""") parser.add_argument("-q", "--regex-query", dest='regexstr', default='', nargs='?', help="python re module regex filepath filtering.") parser.add_argument("-d", "--nfo-skip-days", dest='days', default='', nargs='?', help="Override nfo_skip_days value in config.") parser.add_argument("-c", "--stop-counter", dest='cnt', default='', nargs='?', help="Override stop_counter value in config.") parser.add_argument("-R", "--rerun-delay", dest='delaytm', default='', nargs='?', help="Delay (eg. 1h10m30s or 60 (second)) time and rerun, until all movies proceed. Note: stop_counter value in config or -c must none zero.") parser.add_argument("-i", "--ignore-failed-list", action="store_true", help="Ignore failed list '{}'".format( os.path.join(os.path.abspath(conf.failed_folder()), 'failed_list.txt'))) parser.add_argument("-a", "--auto-exit", action="store_true", help="Auto exit after program complete") parser.add_argument("-g", "--debug", action="store_true", help="Turn on debug mode to generate diagnostic log for issue report.") parser.add_argument("-N", "--no-network-operation", action="store_true", help="No network query, do not get metadata, for cover cropping purposes, only takes effect when main mode is 3.") parser.add_argument("-w", "--website", dest='site', default='', nargs='?', help="Override [priority]website= in config.") parser.add_argument("-D", "--download-images", dest='dnimg', action="store_true", help="Override [common]download_only_missing_images=0 force invoke image downloading.") parser.add_argument("-C", "--config-override", dest='cfgcmd', action='append', nargs=1, help="Common use config override. Grammar: section:key=value[;[section:]key=value] eg. 'de:s=1' or 'debug_mode:switch=1' override[debug_mode]switch=1 Note:this parameters can be used multiple times") parser.add_argument("-z", "--zero-operation", dest='zero_op', action="store_true", help="""Only show job list of files and numbers, and **NO** actual operation is performed. It may help you correct wrong numbers before real job.""") parser.add_argument("-v", "--version", action="version", version=ver) parser.add_argument("-s", "--search", default='', nargs='?', help="Search number") parser.add_argument("-ss", "--specified-source", default='', nargs='?', help="specified Source.") parser.add_argument("-su", "--specified-url", default='', nargs='?', help="specified Url.") args = parser.parse_args() def set_natural_number_or_none(sk, value): if isinstance(value, str) and value.isnumeric() and int(value) >= 0: conf.set_override(f'{sk}={value}') def set_str_or_none(sk, value): if isinstance(value, str) and len(value): conf.set_override(f'{sk}={value}') def set_bool_or_none(sk, value): if isinstance(value, bool) and value: conf.set_override(f'{sk}=1') set_natural_number_or_none("common:main_mode", args.main_mode) set_natural_number_or_none("common:link_mode", args.link_mode) set_str_or_none("common:source_folder", args.path) set_bool_or_none("common:auto_exit", args.auto_exit) set_natural_number_or_none("common:nfo_skip_days", args.days) set_natural_number_or_none("advenced_sleep:stop_counter", args.cnt) set_bool_or_none("common:ignore_failed_list", args.ignore_failed_list) set_str_or_none("advenced_sleep:rerun_delay", args.delaytm) set_str_or_none("priority:website", args.site) if isinstance(args.dnimg, bool) and args.dnimg: conf.set_override("common:download_only_missing_images=0") set_bool_or_none("debug_mode:switch", args.debug) if isinstance(args.cfgcmd, list): for cmd in args.cfgcmd: conf.set_override(cmd[0]) no_net_op = False if conf.main_mode() == 3: no_net_op = args.no_network_operation if no_net_op: conf.set_override("advenced_sleep:stop_counter=0;advenced_sleep:rerun_delay=0s;face:aways_imagecut=1") return args.file, args.number, args.logdir, args.regexstr, args.zero_op, no_net_op, args.search, args.specified_source, args.specified_url class OutLogger(object): def __init__(self, logfile) -> None: self.term = sys.stdout self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def __del__(self): self.close() def __enter__(self): pass def __exit__(self, *args): self.close() def write(self, msg): self.term.write(msg) self.log.write(msg) def flush(self): if 'flush' in dir(self.term): self.term.flush() if 'flush' in dir(self.log): self.log.flush() if 'fileno' in dir(self.log): os.fsync(self.log.fileno()) def close(self): if self.term is not None: sys.stdout = self.term self.term = None if self.log is not None: self.log.close() self.log = None class ErrLogger(OutLogger): def __init__(self, logfile) -> None: self.term = sys.stderr self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def close(self): if self.term is not None: sys.stderr = self.term self.term = None if self.log is not None: self.log.close() self.log = None def dupe_stdout_to_logfile(logdir: str): """ Duplicates the standard output (stdout) and standard error (stderr) to log files. This function creates log files in the specified directory and redirects stdout and stderr to these files for logging purposes. :param logdir: The directory where log files will be created and saved. """ if not isinstance(logdir, str) or len(logdir) == 0: return log_dir = Path(logdir) if not log_dir.exists(): try: log_dir.mkdir(parents=True, exist_ok=True) except: pass if not log_dir.is_dir(): return # Tips for disabling logs by change directory to a same name empty regular file abslog_dir = log_dir.resolve() log_tmstr = datetime.now().strftime("%Y%m%dT%H%M%S") logfile = abslog_dir / f'mdc_{log_tmstr}.txt' errlog = abslog_dir / f'mdc_{log_tmstr}_err.txt' sys.stdout = OutLogger(logfile) sys.stderr = ErrLogger(errlog) def close_logfile(logdir: str): """ Closes the log files and restores standard output and error streams. This function is typically called at the end of the application to ensure that log files are properly closed. :param logdir: The directory where log files are saved. """ if not isinstance(logdir, str) or len(logdir) == 0 or not os.path.isdir(logdir): return # 日志关闭前保存日志路径 filepath = None try: filepath = sys.stdout.filepath except: pass sys.stdout.close() sys.stderr.close() log_dir = Path(logdir).resolve() if isinstance(filepath, Path): print(f"Log file '{filepath}' saved.") assert (filepath.parent.samefile(log_dir)) # 清理空文件 for f in log_dir.glob(r'*_err.txt'): if f.stat().st_size == 0: try: f.unlink(missing_ok=True) except: pass # 合并日志 只检测日志目录内的文本日志,忽略子目录。三天前的日志,按日合并为单个日志,三个月前的日志, # 按月合并为单个月志,去年及以前的月志,今年4月以后将之按年合并为年志 # 测试步骤: """ LOGDIR=/tmp/mlog mkdir -p $LOGDIR for f in {2016..2020}{01..12}{01..28};do;echo $f>$LOGDIR/mdc_${f}T235959.txt;done for f in {01..09}{01..28};do;echo 2021$f>$LOGDIR/mdc_2021${f}T235959.txt;done for f in {00..23};do;echo 20211001T$f>$LOGDIR/mdc_20211001T${f}5959.txt;done echo "$(ls -1 $LOGDIR|wc -l) files in $LOGDIR" # 1932 files in /tmp/mlog mdc -zgic1 -d0 -m3 -o $LOGDIR # python3 ./Movie_Data_Capture.py -zgic1 -o $LOGDIR ls $LOGDIR # rm -rf $LOGDIR """ today = datetime.today() # 第一步,合并到日。3天前的日志,文件名是同一天的合并为一份日志 for i in range(1): txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}T\d{6}$', f.stem, re.A)] if not txts or not len(txts): break e = [f for f in txts if '_err' in f.stem] txts.sort() tmstr_3_days_ago = (today.replace(hour=0) - timedelta(days=3)).strftime("%Y%m%dT99") deadline_day = f'mdc_{tmstr_3_days_ago}' day_merge = [f for f in txts if f.stem < deadline_day] if not day_merge or not len(day_merge): break cutday = len('T235959.txt') # cut length mdc_20201201|T235959.txt for f in day_merge: try: day_file_name = str(f)[:-cutday] + '.txt' # mdc_20201201.txt with open(day_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第二步,合并到月 for i in range(1): # 利用1次循环的break跳到第二步,避免大块if缩进或者使用goto语法 txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}$', f.stem, re.A)] if not txts or not len(txts): break txts.sort() tmstr_3_month_ago = (today.replace(day=1) - timedelta(days=3 * 30)).strftime("%Y%m32") deadline_month = f'mdc_{tmstr_3_month_ago}' month_merge = [f for f in txts if f.stem < deadline_month] if not month_merge or not len(month_merge): break tomonth = len('01.txt') # cut length mdc_202012|01.txt for f in month_merge: try: month_file_name = str(f)[:-tomonth] + '.txt' # mdc_202012.txt with open(month_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第三步,月合并到年 for i in range(1): if today.month < 4: break mons = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{6}$', f.stem, re.A)] if not mons or not len(mons): break mons.sort() deadline_year = f'mdc_{today.year - 1}13' year_merge = [f for f in mons if f.stem < deadline_year] if not year_merge or not len(year_merge): break toyear = len('12.txt') # cut length mdc_2020|12.txt for f in year_merge: try: year_file_name = str(f)[:-toyear] + '.txt' # mdc_2020.txt with open(year_file_name, 'a', encoding='utf-8') as y: y.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第四步,压缩年志 如果有压缩需求,请自行手工压缩,或者使用外部脚本来定时完成。推荐nongnu的lzip,对于 # 这种粒度的文本日志,压缩比是目前最好的。lzip -9的运行参数下,日志压缩比要高于xz -9,而且内存占用更少, # 多核利用率更高(plzip多线程版本),解压速度更快。压缩后的大小差不多是未压缩时的2.4%到3.7%左右, # 100MB的日志文件能缩小到3.7MB。 return filepath def signal_handler(*args): """ A signal handler function for handling operating system signals like Ctrl+C (SIGINT). It defines the behavior of the application when such signals are received, such as graceful termination. :param args: Variable argument list, used to handle signal information. """ print('[!]Ctrl+C detected, Exit.') os._exit(9) def sigdebug_handler(*args): """ A signal handler function specifically for toggling debug mode on or off. It alters the debug configuration based on certain system signals (like window size change in Unix systems). :param args: Variable argument list, used to handle signal information. """ conf = config.getInstance() conf.set_override(f"debug_mode:switch={int(not conf.debug())}") print(f"[!]Debug {('oFF', 'On')[int(conf.debug())]}") # 新增失败文件列表跳过处理,及.nfo修改天数跳过处理,提示跳过视频总数,调试模式(-g)下详细被跳过文件,跳过小广告 def movie_lists(source_folder, regexstr: str) -> typing.List[str]: """ Generates a list of movie file paths from the specified source folder. It filters files based on regular expressions and other criteria, such as file type and size. :param source_folder: The folder to scan for movie files. :param regexstr: A regular expression string to filter movie files. :return: A list of paths to the movie files that match the criteria. """ conf = config.getInstance() main_mode = conf.main_mode() debug = conf.debug() nfo_skip_days = conf.nfo_skip_days() link_mode = conf.link_mode() file_type = conf.media_type().lower().split(",") trailerRE = re.compile(r'-trailer\.', re.IGNORECASE) cliRE = None if isinstance(regexstr, str) and len(regexstr): try: cliRE = re.compile(regexstr, re.IGNORECASE) except: pass failed_list_txt_path = Path(conf.failed_folder()).resolve() / 'failed_list.txt' failed_set = set() if (main_mode == 3 or link_mode) and not conf.ignore_failed_list(): try: flist = failed_list_txt_path.read_text(encoding='utf-8').splitlines() failed_set = set(flist) if len(flist) != len(failed_set): # 检查去重并写回,但是不改变failed_list.txt内条目的先后次序,重复的只保留最后的 fset = failed_set.copy() for i in range(len(flist) - 1, -1, -1): fset.remove(flist[i]) if flist[i] in fset else flist.pop(i) failed_list_txt_path.write_text('\n'.join(flist) + '\n', encoding='utf-8') assert len(fset) == 0 and len(flist) == len(failed_set) except: pass if not Path(source_folder).is_dir(): print('[-]Source folder not found!') return [] total = [] # source = Path(source_folder).resolve() source = Path(source_folder) skip_failed_cnt, skip_nfo_days_cnt = 0, 0 escape_folder_set = set(re.split("[,,]", conf.escape_folder())) for full_name in source.glob(r'**/*'): if main_mode != 3 and set(full_name.parent.parts) & escape_folder_set: continue if not full_name.suffix.lower() in file_type: continue absf = str(full_name) if absf in failed_set: skip_failed_cnt += 1 if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue
number = get_number(False, f.stem)
4
2023-11-25 03:16:13+00:00
16k
abdulhaim/LMRL-Gym
llm_rl_scripts/chess/mc_returns/train_full_games_mc_returns.py
[ { "identifier": "Text", "path": "LLM_RL/environment.py", "snippet": "class Text:\n text: str\n is_action: bool" }, { "identifier": "text_env_eval", "path": "LLM_RL/environment.py", "snippet": "def text_env_eval(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPoli...
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from JaxSeq.utils import convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask from JaxSeq.models.gpt2.load import load_train_state, ModelLoadMode from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectoryChain from LLM_RL.algorithms.mc_returns.data import MCData from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy from LLM_RL.heads.mlp_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.mlp_head import MLPHeadConfig from LLM_RL.algorithms.mc_returns.gpt2.interface import GPT2MCTrain, GPT2MCInference from functools import partial from JaxSeq.logs import pull_logs from transformers import GPT2TokenizerFast from llm_rl_scripts.chess.env.env import FenChessHistoryEnv from JaxSeq.shard_model import copy_sharded_pytree from LLM_RL.algorithms.mc_returns.base_interface import mc_loss from LLM_RL.algorithms.mc_returns.train import eval_loss, train_loop from LLM_RL.algorithms.mc_returns.data import MCData, MCIterableDataset import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json
12,253
tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def mc_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)], [0, obj[-1]["reward"]], True) curr_chain = TextTrajectoryChain(text_trajectory=last_trajectory, next=None) # curr_chain.next = curr_chain for traj in reversed(obj): # iterate through move history backwards except for last transition # embed() prev_trajectory = TextTrajectory([Text(traj["state"], False), Text(traj["action"], True)], [0, traj["reward"]], False) curr_chain = TextTrajectoryChain(text_trajectory=prev_trajectory, next=curr_chain) token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(curr_chain, tokenizer) while token_trajectory_chain.next is not None: yield MCData.from_token_trajectory_chain(token_trajectory_chain, gamma=gamma) token_trajectory_chain = token_trajectory_chain.next mc_data = mc_data_generator(train_data_path) dataset = MCIterableDataset.from_mc_data_iterable(mc_data, tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, )) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_prng_key = jax.random.PRNGKey(3) base_train_state, base_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) base_model.config.gradient_checkpointing = gradient_checkpointing base_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy pi_beta_params = copy_sharded_pytree( model=base_model, pytree=base_train_state.params, ) q_prng_key = jax.random.PRNGKey(4) # embed() q_head_train_state, q_head = load_head_train_state_from_config( model_config=MLPHeadConfig( input_dim=base_model.config.n_embd, hidden_dim=base_model.config.n_embd, output_dim=base_model.config.vocab_size, use_bias=True, layer2_initializer_range=0.0, layer2_bias_init=0.0, ), model_dtype=jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=q_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f)
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_give_position_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.95, cql_weight: float=0.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=150, log_every: int=256, eval_every_steps: Optional[int]=100000, eval_every_epochs: Optional[int]=None, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=100000, save_every_epochs: Optional[int]=None, save_at_beginning: bool=True, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=150, policy_max_output_length: int=10, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=False, ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def mc_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)], [0, obj[-1]["reward"]], True) curr_chain = TextTrajectoryChain(text_trajectory=last_trajectory, next=None) # curr_chain.next = curr_chain for traj in reversed(obj): # iterate through move history backwards except for last transition # embed() prev_trajectory = TextTrajectory([Text(traj["state"], False), Text(traj["action"], True)], [0, traj["reward"]], False) curr_chain = TextTrajectoryChain(text_trajectory=prev_trajectory, next=curr_chain) token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(curr_chain, tokenizer) while token_trajectory_chain.next is not None: yield MCData.from_token_trajectory_chain(token_trajectory_chain, gamma=gamma) token_trajectory_chain = token_trajectory_chain.next mc_data = mc_data_generator(train_data_path) dataset = MCIterableDataset.from_mc_data_iterable(mc_data, tokenizer, BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.RIGHT, max_length=max_length, )) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) return optax.MultiSteps( optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ), every_k_schedule=grad_accum_steps, ) model_prng_key = jax.random.PRNGKey(3) base_train_state, base_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) base_model.config.gradient_checkpointing = gradient_checkpointing base_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy pi_beta_params = copy_sharded_pytree( model=base_model, pytree=base_train_state.params, ) q_prng_key = jax.random.PRNGKey(4) # embed() q_head_train_state, q_head = load_head_train_state_from_config( model_config=MLPHeadConfig( input_dim=base_model.config.n_embd, hidden_dim=base_model.config.n_embd, output_dim=base_model.config.vocab_size, use_bias=True, layer2_initializer_range=0.0, layer2_bias_init=0.0, ), model_dtype=jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=q_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f)
loss_fn = partial(mc_loss, cql_weight=cql_weight)
12
2023-11-21 00:16:42+00:00
16k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Ker...
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,271
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim),
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim),
torch.nn.LayerNorm(attention_dim),
11
2023-11-25 02:38:32+00:00
16k
facebookresearch/ExPLORe
train_finetuning_pixels.py
[ { "identifier": "DrQLearner", "path": "rlpd/agents/drq/drq_learner.py", "snippet": "class DrQLearner(SACLearner):\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n a...
import os import numpy as np import tqdm import wandb import matplotlib.pyplot as plt import pickle import roboverse import types import jax import jax.numpy as jnp from absl import app, flags from flax.core import FrozenDict from ml_collections import config_flags from flax.core import frozen_dict from flax.training import checkpoints from rlpd.agents import DrQLearner, PixelRND, PixelRM, PixelBCAgent from rlpd.data import MemoryEfficientReplayBuffer, ReplayBuffer from rlpd.evaluation import evaluate from rlpd.wrappers import wrap_pixels from rlpd.agents.drq.icvf import PixelICVF from rlpd import gc_dataset from gym.wrappers import TimeLimit, FilterObservation, RecordEpisodeStatistics from rlpd.data import Dataset from rlpd.data.cog_datasets import COGDataset from functools import partial
13,032
"File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rm_config", "configs/pixel_rm_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rnd_config", "configs/pixel_rnd_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "bc_config", "configs/pixel_bc_config.py", "File path to the training hyperparameter configuration", lock_config=False, ) flags.DEFINE_string( "offline_relabel_type", "gt", "Whether to use reward from the offline dataset. [gt/pred/min]", ) flags.DEFINE_boolean("use_rnd_offline", False, "Whether to use rnd offline.") flags.DEFINE_boolean("use_rnd_online", False, "Whether to use rnd online.") def combine(one_dict, other_dict): combined = {} for k, v in one_dict.items(): if isinstance(v, FrozenDict) or isinstance(v, dict): if len(v) == 0: combined[k] = v else: combined[k] = combine(v, other_dict[k]) else: tmp = np.empty( (v.shape[0] + other_dict[k].shape[0], *v.shape[1:]), dtype=v.dtype ) tmp[0::2] = v tmp[1::2] = other_dict[k] combined[k] = tmp return FrozenDict(combined) def add_prefix(prefix, dict): return {prefix + k: v for k, v in dict.items()} def main(_): wandb.init(project=FLAGS.project_name, mode="online") wandb.config.update(FLAGS) if FLAGS.save_dir is not None: log_dir = os.path.join( FLAGS.save_dir, f"{FLAGS.env_name}-s{FLAGS.seed}-icvf_{FLAGS.use_icvf}-ours_{FLAGS.use_rnd_offline}", ) print("logging to", log_dir) if FLAGS.checkpoint_model: chkpt_dir = os.path.join(log_dir, "checkpoints") os.makedirs(chkpt_dir, exist_ok=True) if FLAGS.checkpoint_buffer: buffer_dir = os.path.join(log_dir, "buffers") os.makedirs(buffer_dir, exist_ok=True) def wrap(env): return wrap_pixels( env, action_repeat=1, num_stack=1, camera_id=0, ) def render(env, *args, **kwargs): return env.render_obs() if FLAGS.env_name == "Widow250PickTray-v0": env_name_alt = "pickplace" cog_max_path_length = 40 elif FLAGS.env_name == "Widow250DoubleDrawerOpenGraspNeutral-v0": env_name_alt = "closeddrawer_small" cog_max_path_length = 50 elif FLAGS.env_name == "Widow250DoubleDrawerCloseOpenGraspNeutral-v0": env_name_alt = "blockeddrawer1_small" cog_max_path_length = 80 env = roboverse.make(FLAGS.env_name, transpose_image=False) env.render = types.MethodType(render, env) env = FilterObservation(env, ["image"]) env = TimeLimit(env, max_episode_steps=cog_max_path_length) # TODO env, pixel_keys = wrap(env) env = RecordEpisodeStatistics(env, deque_size=1) env.seed(FLAGS.seed) eval_env = roboverse.make(FLAGS.env_name, transpose_image=False) eval_env.render = types.MethodType(render, eval_env) eval_env = FilterObservation(eval_env, ["image"]) eval_env = TimeLimit(eval_env, max_episode_steps=cog_max_path_length) # TODO eval_env, _ = wrap(eval_env) eval_env.seed(FLAGS.seed + 42) dataset_path = os.path.join("data", env_name_alt) print("Data Path:", dataset_path) np_rng = np.random.default_rng(FLAGS.seed)
""" Modified from https://github.com/ikostrikov/rlpd/blob/main/rlpd/train_finetuning_pixels.py Original lincense information: MIT License Copyright (c) 2022 Ilya Kostrikov, Philip J. Ball, Laura Smith Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #! /usr/bin/env python ### cog imports ### ### cog imports ### FLAGS = flags.FLAGS flags.DEFINE_string("project_name", "explore-cog", "wandb project name.") flags.DEFINE_string("env_name", "cheetah-run-v0", "Environment name.") flags.DEFINE_float( "dataset_subsample_ratio", 0.1, "Ratio of the dataset to subsample (done twice)" ) flags.DEFINE_bool("use_icvf", False, "Whether to use the icvf encoder") flags.DEFINE_float("offline_ratio", 0.5, "Offline ratio.") flags.DEFINE_integer("seed", 42, "Random seed.") flags.DEFINE_integer("eval_episodes", 100, "Number of episodes used for evaluation.") flags.DEFINE_integer("log_interval", 1000, "Logging interval.") flags.DEFINE_integer("eval_interval", 5000, "Eval interval.") flags.DEFINE_integer("batch_size", 256, "Mini batch size.") flags.DEFINE_integer("max_steps", 500000, "Number of training steps.") flags.DEFINE_integer( "start_training", 5000, "Number of training steps to start training." ) flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.") flags.DEFINE_string("save_dir", "exp_data_cog", "Directory to save checkpoints.") flags.DEFINE_bool("checkpoint_model", False, "save model") flags.DEFINE_bool("checkpoint_buffer", False, "save replay buffer") flags.DEFINE_integer("utd_ratio", 1, "Update to data ratio.") flags.DEFINE_float("bc_pretrain_rollin", 0.0, "rollin coeff") flags.DEFINE_integer( "bc_pretrain_steps", 10000, "Pre-train BC policy for a number of steps on pure offline data", ) config_flags.DEFINE_config_file( "config", "configs/rlpd_pixels_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rm_config", "configs/pixel_rm_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rnd_config", "configs/pixel_rnd_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "bc_config", "configs/pixel_bc_config.py", "File path to the training hyperparameter configuration", lock_config=False, ) flags.DEFINE_string( "offline_relabel_type", "gt", "Whether to use reward from the offline dataset. [gt/pred/min]", ) flags.DEFINE_boolean("use_rnd_offline", False, "Whether to use rnd offline.") flags.DEFINE_boolean("use_rnd_online", False, "Whether to use rnd online.") def combine(one_dict, other_dict): combined = {} for k, v in one_dict.items(): if isinstance(v, FrozenDict) or isinstance(v, dict): if len(v) == 0: combined[k] = v else: combined[k] = combine(v, other_dict[k]) else: tmp = np.empty( (v.shape[0] + other_dict[k].shape[0], *v.shape[1:]), dtype=v.dtype ) tmp[0::2] = v tmp[1::2] = other_dict[k] combined[k] = tmp return FrozenDict(combined) def add_prefix(prefix, dict): return {prefix + k: v for k, v in dict.items()} def main(_): wandb.init(project=FLAGS.project_name, mode="online") wandb.config.update(FLAGS) if FLAGS.save_dir is not None: log_dir = os.path.join( FLAGS.save_dir, f"{FLAGS.env_name}-s{FLAGS.seed}-icvf_{FLAGS.use_icvf}-ours_{FLAGS.use_rnd_offline}", ) print("logging to", log_dir) if FLAGS.checkpoint_model: chkpt_dir = os.path.join(log_dir, "checkpoints") os.makedirs(chkpt_dir, exist_ok=True) if FLAGS.checkpoint_buffer: buffer_dir = os.path.join(log_dir, "buffers") os.makedirs(buffer_dir, exist_ok=True) def wrap(env): return wrap_pixels( env, action_repeat=1, num_stack=1, camera_id=0, ) def render(env, *args, **kwargs): return env.render_obs() if FLAGS.env_name == "Widow250PickTray-v0": env_name_alt = "pickplace" cog_max_path_length = 40 elif FLAGS.env_name == "Widow250DoubleDrawerOpenGraspNeutral-v0": env_name_alt = "closeddrawer_small" cog_max_path_length = 50 elif FLAGS.env_name == "Widow250DoubleDrawerCloseOpenGraspNeutral-v0": env_name_alt = "blockeddrawer1_small" cog_max_path_length = 80 env = roboverse.make(FLAGS.env_name, transpose_image=False) env.render = types.MethodType(render, env) env = FilterObservation(env, ["image"]) env = TimeLimit(env, max_episode_steps=cog_max_path_length) # TODO env, pixel_keys = wrap(env) env = RecordEpisodeStatistics(env, deque_size=1) env.seed(FLAGS.seed) eval_env = roboverse.make(FLAGS.env_name, transpose_image=False) eval_env.render = types.MethodType(render, eval_env) eval_env = FilterObservation(eval_env, ["image"]) eval_env = TimeLimit(eval_env, max_episode_steps=cog_max_path_length) # TODO eval_env, _ = wrap(eval_env) eval_env.seed(FLAGS.seed + 42) dataset_path = os.path.join("data", env_name_alt) print("Data Path:", dataset_path) np_rng = np.random.default_rng(FLAGS.seed)
ds = COGDataset(
11
2023-11-19 21:28:52+00:00
16k
Luo-Z13/pointobb
PointOBB/mmdet/models/roi_heads/PointOBB_head.py
[ { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "MODELS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_head", "path": "Poi...
import math import torch import torch.nn.functional as F import torch.nn as nn import copy import numpy as np import cv2 from mmdet.core import bbox2result, bbox2roi, rbbox2roi, build_assigner, build_sampler, multi_apply from ..builder import HEADS, MODELS, build_head, build_roi_extractor, build_loss from .standard_roi_head import StandardRoIHead from .cascade_roi_head import CascadeRoIHead from mmdet.core.bbox.iou_calculators import bbox_overlaps from .test_mixins import BBoxTestMixin, MaskTestMixin from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core.bbox.transforms import rbbox2result from mmcv.cnn import Scale, ConvModule from mmcv.ops import box_iou_rotated from typing import Any, List, Sequence, Tuple, Union from torch import Tensor from mmdet.models.utils.base_bbox_coder import BaseBBoxCoder from ..detectors.utils import obb2xyxy, regularize_boxes, reduce_mean, obb2poly_np
14,154
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1)
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1)
@HEADS.register_module()
0
2023-11-20 07:50:12+00:00
16k
wangermeng2021/llm-webui
main.py
[ { "identifier": "login_huggingface", "path": "src/utils/common.py", "snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_T...
import pandas as pd import math import numpy as np import gc import os,requests import subprocess,threading import time import gradio as gr import os import traceback import numpy as np import glob import shutil import torch import socket from src.utils.common import login_huggingface from src.finetune.huggingface_inference import HuggingfaceInference from src.finetune.llama_cpp_inference import LlamaCppInference from src.rag.qa_with_rag import QAWithRAG from src.utils.common import read_yaml,get_first_row_from_dataset,\ get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template from transformers.training_args import OptimizerNames from huggingface_hub import hf_hub_download from src.utils import download_model from pathlib import Path from src.finetune.qlora_trainer import QloraTrainer from src.finetune.qlora_trainer import TRAINING_STATUS from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper
12,358
local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 )
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889' # os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889' LOCAL_HOST_IP = "0.0.0.0" TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/" INIT_DATASET_NAME = "test_python_code_instructions_5000_rows" RAG_DATA_LIST_DROPDOWN = "" TEXT_SPLITTER_DROPDOWN = "" CHUNK_SIZE_SLIDER = 0 CHUNK_OVERLAP_SLIDER = -1 SEPARATORS_TEXTBOX = "" EMBEDDING_MODEL_SOURCE_RADIO = "" HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = "" LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = "" CHAT_MODEL_SOURCE_RADIO = "" HUB_CHAT_MODEL_NAMES_DROPDOWN = "" LOCAL_CHAT_MODEL_NAMES_DROPDOWN = "" SEARCH_TOP_K_SLIDER = "" SEARCH_SCORE_THRESHOLD_SLIDER = "" training_ret_val = -1 error_msg = "" current_running_model_name = "" infer_model = None stop_generation_status = False chatbot_history=[] chatbot_height = 500 rag_chatbot_history=[] rag_stop_generation_status = False qa_with_rag = QAWithRAG() train_param_config = {} train_param_config["dataset"]={} train_param_config["model"]={} train_param_config["training"]={} model_zoo_config = {} transformer_optimizer_list = [] model_context_window = 0 init_train_file_path = None init_val_file_path = None INIT_PREFIX1 = "" INIT_PREFIX2 = "" INIT_PREFIX3 = "" INIT_PREFIX4 = "" INIT_COL1_TEXT = "" INIT_COL2_TEXT = "" INIT_COL3_TEXT = "" INIT_COL4_TEXT = "" col_names = [] DATASET_FIRST_ROW = None local_model_list = "" local_model_root_dir = "" base_model_names = [] training_base_model_names = [] embedding_model_names = [] base_model_context_window = [] local_dataset_list = [] local_dataset_root_dir = "" def get_local_embedding_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 )
if validate_model_path(base_model_names[0])[0]:
9
2023-11-25 12:37:21+00:00
16k
danilonumeroso/conar
models/vkc_reasoner.py
[ { "identifier": "AlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class AlgorithmReasoner(nn.Module):\n @staticmethod\n def prepare_batch(batch):\n batch = batch.clone()\n for name, tensor in batch.items():\n if not torch.is_tensor(tensor):\n ...
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from utils_execution import get_number_of_nodes from hyperparameters import get_hyperparameters from datasets._configs import CONFIGS import torch import torch_geometric import torch_geometric.utils as tg_utils import torch_scatter import networkx as nx
13,380
class LitVKCReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = AlgorithmReasoner( self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_VKC_metrics(self, batch, output_logits): selected_dense = torch_geometric.utils.to_dense_batch(output_logits['output']['selected'], batch=batch.batch)[0] selected_dense_topk = torch.sort(torch.topk(selected_dense.squeeze(-1), self.dataset.k, dim=-1).indices).values selected_topk = (selected_dense_topk+batch.ptr[:-1].unsqueeze(-1)).view(-1) selected_topk_gt = batch.selected.nonzero().squeeze(-1) selected_batch = batch.batch[selected_topk] acc_selected_topk = torch_scatter.scatter_mean((selected_topk == selected_topk_gt).float(), selected_batch).mean() G = tg_utils.to_networkx(batch, to_undirected=True, edge_attrs=['edge_attr']) mspl = nx.multi_source_dijkstra_path_length(G, sources=selected_topk.tolist(), weight='edge_attr') mspl = torch.tensor([mspl[i] for i in range(batch.num_nodes)]).to(selected_dense) farthest = torch_scatter.scatter_max(mspl, batch.batch)[0] assert (farthest + torch.finfo(torch.float32).eps >= batch.farthest).all() return { 'acc_topk': acc_selected_topk, 'farthest': farthest.mean(), 'farthest_gt': batch.farthest.mean(), 'farthest_relative_error': ((farthest-batch.farthest)/batch.farthest).mean(), } def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_VKC_metrics(batch, output_logits)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix nns = get_number_of_nodes(self.algorithm, split) for nn in nns: self.dataset_kwargs['split'] = split if (split, nn) not in self._datasets: self._datasets[(split, nn)] = self.dataset_class( self.dataset_root, nn,
class LitVKCReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = AlgorithmReasoner( self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_VKC_metrics(self, batch, output_logits): selected_dense = torch_geometric.utils.to_dense_batch(output_logits['output']['selected'], batch=batch.batch)[0] selected_dense_topk = torch.sort(torch.topk(selected_dense.squeeze(-1), self.dataset.k, dim=-1).indices).values selected_topk = (selected_dense_topk+batch.ptr[:-1].unsqueeze(-1)).view(-1) selected_topk_gt = batch.selected.nonzero().squeeze(-1) selected_batch = batch.batch[selected_topk] acc_selected_topk = torch_scatter.scatter_mean((selected_topk == selected_topk_gt).float(), selected_batch).mean() G = tg_utils.to_networkx(batch, to_undirected=True, edge_attrs=['edge_attr']) mspl = nx.multi_source_dijkstra_path_length(G, sources=selected_topk.tolist(), weight='edge_attr') mspl = torch.tensor([mspl[i] for i in range(batch.num_nodes)]).to(selected_dense) farthest = torch_scatter.scatter_max(mspl, batch.batch)[0] assert (farthest + torch.finfo(torch.float32).eps >= batch.farthest).all() return { 'acc_topk': acc_selected_topk, 'farthest': farthest.mean(), 'farthest_gt': batch.farthest.mean(), 'farthest_relative_error': ((farthest-batch.farthest)/batch.farthest).mean(), } def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_VKC_metrics(batch, output_logits)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix nns = get_number_of_nodes(self.algorithm, split) for nn in nns: self.dataset_kwargs['split'] = split if (split, nn) not in self._datasets: self._datasets[(split, nn)] = self.dataset_class( self.dataset_root, nn,
CONFIGS[self.algorithm][split]['num_samples'],
4
2023-11-20 15:32:43+00:00
16k
harisankar95/pathfinding3D
test/test_path.py
[ { "identifier": "DiagonalMovement", "path": "pathfinding3d/core/diagonal_movement.py", "snippet": "class DiagonalMovement:\n always = 1\n never = 2\n if_at_most_one_obstacle = 3\n only_when_no_obstacle = 4" }, { "identifier": "Grid", "path": "pathfinding3d/core/grid.py", "sni...
import numpy as np import pytest from pathfinding3d.core.diagonal_movement import DiagonalMovement from pathfinding3d.core.grid import Grid from pathfinding3d.core.node import GridNode from pathfinding3d.finder.a_star import AStarFinder from pathfinding3d.finder.best_first import BestFirst from pathfinding3d.finder.bi_a_star import BiAStarFinder from pathfinding3d.finder.breadth_first import BreadthFirstFinder from pathfinding3d.finder.dijkstra import DijkstraFinder from pathfinding3d.finder.finder import ExecutionRunsException, ExecutionTimeException from pathfinding3d.finder.ida_star import IDAStarFinder from pathfinding3d.finder.msp import MinimumSpanningTree
11,009
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup()
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup()
finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT)
0
2023-11-21 10:14:12+00:00
16k
yuukawahiroshi/ddb-tools
mixins_ddb.py
[ { "identifier": "DDIModel", "path": "utils/ddi_utils.py", "snippet": "class DDIModel:\n def __init__(self, ddi_bytes: bytes) -> None:\n self.ddi_bytes = ddi_bytes\n self.ddi_data = None\n self.phdc_data = {}\n self.tdb_data = {}\n self.sta_data = {}\n self.ar...
from typing import TypedDict from utils.ddi_utils import DDIModel, str_to_bytes, str_to_data, stream_reverse_search import argparse import io import re import os import os.path import struct
11,937
ddb_snd_offset = output_stream.tell() mixins_ddb_stream.seek(snd_offset) snd_bytes = mixins_ddb_stream.read(snd_len) hed = snd_bytes[0:4].decode() if hed != "SND ": raise Exception("Mixins DDB file is broken") output_stream.write(snd_bytes) vqm_meta_list.append({ "idx": vqm_idx, "epr": epr_list, "snd_id": snd_id, "snd": ddb_snd_offset, "fs": vqm_info["fs"], "unknown1": vqm_info["unknown1"], "pitch1": vqm_info["pitch1"], "pitch2": vqm_info["pitch2"], "unknown2": vqm_info["unknown2"], "unknown3": vqm_info["unknown3"], "dynamics": vqm_info["dynamics"], }) # Create new DDI vqm_stream = _create_vqm_stream(vqm_meta_list) ddi_vqm_bytes = vqm_stream.getvalue() if "vqm" in src_ddi_model.ddi_data_dict: ddi_vqm_pos = src_ddi_model.offset_map["vqm"][0] ddi_vqm_end_pos = src_ddi_model.offset_map["vqm"][1] else: ddi_vqm_pos = src_ddi_bytes.find(ddi_footer) ddi_vqm_end_pos = ddi_vqm_pos # Bump dbv_len dbv_len_post = src_ddi_model.offset_map["dbv"][0] + 0x18 src_ddi_stream.seek(dbv_len_post) src_ddi_dbv_len = int.from_bytes(src_ddi_stream.read(4), byteorder='little') src_ddi_dbv_len += 1 src_ddi_stream.seek(dbv_len_post) src_ddi_stream.write(src_ddi_dbv_len.to_bytes(4, byteorder='little')) src_ddi_bytes = src_ddi_stream.getvalue() dst_ddi_bytes = byte_replace(src_ddi_bytes, ddi_vqm_pos, ddi_vqm_end_pos - ddi_vqm_pos, ddi_vqm_bytes) return dst_ddi_bytes def mixins_sta2vqm(src_ddi_bytes: bytes, output_stream: io.BufferedWriter, mixins_ddi_model: DDIModel, mixins_ddb_stream: io.BufferedReader, sta2vqm_phoneme: str): mixins_ddi_stream = mixins_ddi_model.ddi_data print("Reading source DDI...") src_ddi_model = DDIModel(src_ddi_bytes) src_ddi_model.read() src_ddi_stream = src_ddi_model.ddi_data if "vqm" in src_ddi_model.ddi_data_dict: print("Source DDI already has vqm stream, continue will replace it and won't remove vqm stream from ddb file.") print("Continue? (Y/n)", end=" ") choice = input().strip().lower() if choice != "y" or choice != "": return # Find stationary in mixins mixins_sta_items = None for _, sta_items in mixins_ddi_model.sta_data.items(): if sta_items["phoneme"] == sta2vqm_phoneme: mixins_sta_items = sta_items break if mixins_sta_items is None: raise Exception("Mixins DDI doesn't have stationary entry for phoneme \"%s\"" % sta2vqm_phoneme) vqm_meta_list: list[VQMMeta] = [] vqm_idx = 0 for sta_idx, sta_item in mixins_sta_items["stap"].items(): output_epr_list = [] # EpR epr_list = sta_item["epr"] if len(epr_list) < 100: print(f"Warning: EpR count is less than 100, EpR count: {len(epr_list)}") continue epr_list = epr_list[0:100] for epr_info in epr_list: epr_offset = epr_info.split("=") ddb_epr_offset = output_stream.tell() epr_offset = int(epr_offset[1], 16) mixins_ddb_stream.seek(epr_offset) hed = mixins_ddb_stream.read(4) if hed != b"FRM2": raise Exception("Mixins DDB file is broken") frm_len = int.from_bytes(mixins_ddb_stream.read(4), byteorder='little') epr_cutoff = epr_offset + frm_len mixins_ddb_stream.seek(epr_offset) frm_bytes = mixins_ddb_stream.read(frm_len) output_stream.write(frm_bytes) output_epr_list.append(ddb_epr_offset) # SND ddi_snd_pos, snd_name = sta_item["snd"].split("=") snd_offset, snd_id = snd_name.split("_") ddi_snd_pos = int(ddi_snd_pos, 16) snd_offset = int(snd_offset, 16) snd_id = int(snd_id, 16)
#!/bin/env python3 # I thought what I'd do was, I'd pretend I was one of those deaf-mutes. from __future__ import annotations ddi_footer = b'\x05\x00\x00\x00' + "voice".encode() class SmartFormatter(argparse.HelpFormatter): def _split_lines(self, text, width): if text.startswith('R|'): return text[2:].splitlines() # this is the RawTextHelpFormatter._split_lines return argparse.HelpFormatter._split_lines(self, text, width) class VQMMeta(TypedDict): idx: str epr: list[int] snd_id: int snd: int fs: int unknown1: str pitch1: float pitch2: float unknown2: float unknown3: float dynamics: float def byte_replace(src_bytes: bytes, offset: int, override_len: int, replace_bytes: bytes): return src_bytes[:offset] + replace_bytes + src_bytes[offset + override_len:] def parse_args(args=None): # : list[str] # initialize parser parser = argparse.ArgumentParser(formatter_class=SmartFormatter) parser.add_argument('--src_path', required=True, help='source ddi file path') parser.add_argument('--mixins_path', help='the mixins ddi file path. default to be same as src_path') parser.add_argument('--dst_path', help='output folder, ' 'default to be "./[singer name]/mixins"') parser.add_argument('--mixins_item', choices=['vqm', 'sta2vqm'], default='vqm', help='R|mixins item, ' 'default to be "vqm"\n' 'select from: \n' ' vqm: growl\n' ' sta2vqm: convert stationary entry to growl\n') parser.add_argument('--sta2vqm_phoneme', default="Grw", help='phoneme for sta2vqm, will use this phoneme to generate growl, default to be "Grw"') # parse args args = parser.parse_args(args) src_ddi_path: str = os.path.normpath(args.src_path) if not os.path.exists(src_ddi_path): raise Exception("ddi file not exists") src_path = os.path.dirname(src_ddi_path) src_singer_name = os.path.splitext(os.path.basename(src_ddi_path))[0] mixins_ddi_path = args.mixins_path or src_ddi_path mixins_ddi_path: str = os.path.normpath(mixins_ddi_path) mixins_path = os.path.dirname(mixins_ddi_path) mixins_singer_name = os.path.splitext(os.path.basename(mixins_ddi_path))[0] dst_path: str = args.dst_path if dst_path is None: dst_path = os.path.join(src_path, "mixins") dst_path: str = os.path.normpath(dst_path) # make dirs if not os.path.exists(dst_path): os.makedirs(dst_path) mixins_item = args.mixins_item return src_path, src_singer_name, mixins_path, mixins_singer_name, dst_path, mixins_item, args def _create_vqm_stream(vqm_meta_list: list[VQMMeta]): # Create VQM struct vqm_stream = io.BytesIO() vqm_stream.write(b'\xFF'*8) vqm_stream.write(b'VQM ') vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((1).to_bytes(4, byteorder='little')) vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((1).to_bytes(4, byteorder='little')) vqm_stream.write(b'\xFF'*8) vqm_stream.write(b'VQMu') vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((1).to_bytes(4, byteorder='little')) vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little')) vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little')) for vqm_meta in vqm_meta_list: vqm_stream.write(b'\xFF'*8) vqm_stream.write(b"VQMp") vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((0).to_bytes(4, byteorder='little')) vqm_stream.write((1).to_bytes(4, byteorder='little')) vqm_stream.write(str_to_bytes(vqm_meta["unknown1"])) vqm_stream.write(struct.pack("<f", 224.0)) # Unknown vqm_stream.write(struct.pack("<f", vqm_meta["pitch2"])) vqm_stream.write(struct.pack("<f", vqm_meta["unknown2"])) vqm_stream.write(struct.pack("<f", vqm_meta["dynamics"])) vqm_stream.write(struct.pack("<f", vqm_meta["unknown3"])) vqm_stream.write((0).to_bytes(4, byteorder='little')) # EpR vqm_stream.write(b'\xFF'*4) vqm_stream.write(len(vqm_meta["epr"]).to_bytes(4, byteorder='little')) for epr_offset in vqm_meta["epr"]: vqm_stream.write(epr_offset.to_bytes(8, byteorder='little')) # SND vqm_stream.write(vqm_meta["fs"].to_bytes(4, byteorder='little')) vqm_stream.write(b'\x01\x00') vqm_stream.write(vqm_meta["snd_id"].to_bytes(4, byteorder='little')) vqm_stream.write(vqm_meta["snd"].to_bytes(8, byteorder='little')) vqm_stream.write(b'\xFF'*0x10) vqm_stream.write(str_to_data(vqm_meta["idx"])) vqm_stream.write(str_to_data("GROWL")) vqm_stream.write(str_to_data("vqm")) return vqm_stream def mixins_vqm(src_ddi_bytes: bytes, output_stream: io.BufferedWriter, mixins_ddi_model: DDIModel, mixins_ddb_stream: io.BufferedReader): mixins_ddi_stream = mixins_ddi_model.ddi_data if "vqm" not in mixins_ddi_model.ddi_data_dict: raise Exception("Mixins DDI doesn't have vqm stream.") print("Reading source DDI...") src_ddi_model = DDIModel(src_ddi_bytes) src_ddi_model.read() src_ddi_stream = src_ddi_model.ddi_data if "vqm" in src_ddi_model.ddi_data_dict: print("Source DDI already has vqm stream, continue will replace it and won't remove vqm stream from ddb file.") print("Continue? (Y/n)", end=" ") choice = input().strip().lower() if choice != "y" or choice != "": return vqm_meta_list: list[VQMMeta] = [] for vqm_idx, vqm_info in mixins_ddi_model.vqm_data.items(): epr_list = [] for epr_info in vqm_info["epr"]: ddi_epr_pos, epr_offset = epr_info.split("=") ddb_epr_offset = output_stream.tell() ddi_epr_pos = int(ddi_epr_pos, 16) epr_offset = int(epr_offset, 16) mixins_ddb_stream.seek(epr_offset) hed = mixins_ddb_stream.read(4).decode() if hed != "FRM2": raise Exception("Mixins DDB file is broken") frm_len = int.from_bytes(mixins_ddb_stream.read(4), byteorder='little') mixins_ddb_stream.seek(epr_offset) frm_bytes = mixins_ddb_stream.read(frm_len) output_stream.write(frm_bytes) epr_list.append(ddb_epr_offset) ddi_snd_pos, snd_name = vqm_info["snd"].split("=") snd_offset, snd_id = snd_name.split("_") ddi_snd_pos = int(ddi_snd_pos, 16) snd_offset = int(snd_offset, 16) snd_id = int(snd_id, 16) mixins_ddb_stream.seek(snd_offset) hed = mixins_ddb_stream.read(4).decode() if hed != "SND ": raise Exception("Mixins DDB file is broken") snd_len = int.from_bytes(mixins_ddb_stream.read(4), byteorder='little') ddb_snd_offset = output_stream.tell() mixins_ddb_stream.seek(snd_offset) snd_bytes = mixins_ddb_stream.read(snd_len) hed = snd_bytes[0:4].decode() if hed != "SND ": raise Exception("Mixins DDB file is broken") output_stream.write(snd_bytes) vqm_meta_list.append({ "idx": vqm_idx, "epr": epr_list, "snd_id": snd_id, "snd": ddb_snd_offset, "fs": vqm_info["fs"], "unknown1": vqm_info["unknown1"], "pitch1": vqm_info["pitch1"], "pitch2": vqm_info["pitch2"], "unknown2": vqm_info["unknown2"], "unknown3": vqm_info["unknown3"], "dynamics": vqm_info["dynamics"], }) # Create new DDI vqm_stream = _create_vqm_stream(vqm_meta_list) ddi_vqm_bytes = vqm_stream.getvalue() if "vqm" in src_ddi_model.ddi_data_dict: ddi_vqm_pos = src_ddi_model.offset_map["vqm"][0] ddi_vqm_end_pos = src_ddi_model.offset_map["vqm"][1] else: ddi_vqm_pos = src_ddi_bytes.find(ddi_footer) ddi_vqm_end_pos = ddi_vqm_pos # Bump dbv_len dbv_len_post = src_ddi_model.offset_map["dbv"][0] + 0x18 src_ddi_stream.seek(dbv_len_post) src_ddi_dbv_len = int.from_bytes(src_ddi_stream.read(4), byteorder='little') src_ddi_dbv_len += 1 src_ddi_stream.seek(dbv_len_post) src_ddi_stream.write(src_ddi_dbv_len.to_bytes(4, byteorder='little')) src_ddi_bytes = src_ddi_stream.getvalue() dst_ddi_bytes = byte_replace(src_ddi_bytes, ddi_vqm_pos, ddi_vqm_end_pos - ddi_vqm_pos, ddi_vqm_bytes) return dst_ddi_bytes def mixins_sta2vqm(src_ddi_bytes: bytes, output_stream: io.BufferedWriter, mixins_ddi_model: DDIModel, mixins_ddb_stream: io.BufferedReader, sta2vqm_phoneme: str): mixins_ddi_stream = mixins_ddi_model.ddi_data print("Reading source DDI...") src_ddi_model = DDIModel(src_ddi_bytes) src_ddi_model.read() src_ddi_stream = src_ddi_model.ddi_data if "vqm" in src_ddi_model.ddi_data_dict: print("Source DDI already has vqm stream, continue will replace it and won't remove vqm stream from ddb file.") print("Continue? (Y/n)", end=" ") choice = input().strip().lower() if choice != "y" or choice != "": return # Find stationary in mixins mixins_sta_items = None for _, sta_items in mixins_ddi_model.sta_data.items(): if sta_items["phoneme"] == sta2vqm_phoneme: mixins_sta_items = sta_items break if mixins_sta_items is None: raise Exception("Mixins DDI doesn't have stationary entry for phoneme \"%s\"" % sta2vqm_phoneme) vqm_meta_list: list[VQMMeta] = [] vqm_idx = 0 for sta_idx, sta_item in mixins_sta_items["stap"].items(): output_epr_list = [] # EpR epr_list = sta_item["epr"] if len(epr_list) < 100: print(f"Warning: EpR count is less than 100, EpR count: {len(epr_list)}") continue epr_list = epr_list[0:100] for epr_info in epr_list: epr_offset = epr_info.split("=") ddb_epr_offset = output_stream.tell() epr_offset = int(epr_offset[1], 16) mixins_ddb_stream.seek(epr_offset) hed = mixins_ddb_stream.read(4) if hed != b"FRM2": raise Exception("Mixins DDB file is broken") frm_len = int.from_bytes(mixins_ddb_stream.read(4), byteorder='little') epr_cutoff = epr_offset + frm_len mixins_ddb_stream.seek(epr_offset) frm_bytes = mixins_ddb_stream.read(frm_len) output_stream.write(frm_bytes) output_epr_list.append(ddb_epr_offset) # SND ddi_snd_pos, snd_name = sta_item["snd"].split("=") snd_offset, snd_id = snd_name.split("_") ddi_snd_pos = int(ddi_snd_pos, 16) snd_offset = int(snd_offset, 16) snd_id = int(snd_id, 16)
real_snd_offset = stream_reverse_search(mixins_ddb_stream, b"SND ", snd_offset)
3
2023-11-20 11:37:46+00:00
16k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
12,312
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
14
2023-11-20 06:34:21+00:00
16k
mjavadpur/mj_ONNX_SadTalker
inference_onnx.py
[ { "identifier": "AnimateFromCoeff", "path": "src/facerender/animate_onnx.py", "snippet": "class AnimateFromCoeff():\n\n def __init__(self, sadtalker_path, device):\n\n with open(sadtalker_path['facerender_yaml']) as f:\n config = yaml.safe_load(f)\n\n generator = OcclusionAwa...
from glob import glob from time import strftime from argparse import ArgumentParser from src.facerender.animate_onnx import AnimateFromCoeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.utils.preprocess import CropAndExtract from src.test_audio2coeff import Audio2Coeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.face3d.visualize import gen_composed_video import shutil import torch import os, sys, time import base64
13,128
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device) audio_to_coeff = Audio2Coeff(sadtalker_paths, device) animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device) #crop image and extract 3dmm from image first_frame_dir = os.path.join(save_dir, 'first_frame_dir') os.makedirs(first_frame_dir, exist_ok=True) print('3DMM Extraction for source image') first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\ source_image_flag=True, pic_size=args.size) if first_coeff_path is None: print("Can't get the coeffs of the input") return if ref_eyeblink is not None: ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0] ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname) os.makedirs(ref_eyeblink_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing eye blinking') ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False) else: ref_eyeblink_coeff_path=None if ref_pose is not None: if ref_pose == ref_eyeblink: ref_pose_coeff_path = ref_eyeblink_coeff_path else: ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0] ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname) os.makedirs(ref_pose_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing pose') ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False) else: ref_pose_coeff_path=None #audio2ceoff
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device) audio_to_coeff = Audio2Coeff(sadtalker_paths, device) animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device) #crop image and extract 3dmm from image first_frame_dir = os.path.join(save_dir, 'first_frame_dir') os.makedirs(first_frame_dir, exist_ok=True) print('3DMM Extraction for source image') first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(pic_path, first_frame_dir, args.preprocess,\ source_image_flag=True, pic_size=args.size) if first_coeff_path is None: print("Can't get the coeffs of the input") return if ref_eyeblink is not None: ref_eyeblink_videoname = os.path.splitext(os.path.split(ref_eyeblink)[-1])[0] ref_eyeblink_frame_dir = os.path.join(save_dir, ref_eyeblink_videoname) os.makedirs(ref_eyeblink_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing eye blinking') ref_eyeblink_coeff_path, _, _ = preprocess_model.generate(ref_eyeblink, ref_eyeblink_frame_dir, args.preprocess, source_image_flag=False) else: ref_eyeblink_coeff_path=None if ref_pose is not None: if ref_pose == ref_eyeblink: ref_pose_coeff_path = ref_eyeblink_coeff_path else: ref_pose_videoname = os.path.splitext(os.path.split(ref_pose)[-1])[0] ref_pose_frame_dir = os.path.join(save_dir, ref_pose_videoname) os.makedirs(ref_pose_frame_dir, exist_ok=True) print('3DMM Extraction for the reference video providing pose') ref_pose_coeff_path, _, _ = preprocess_model.generate(ref_pose, ref_pose_frame_dir, args.preprocess, source_image_flag=False) else: ref_pose_coeff_path=None #audio2ceoff
batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=args.still)
6
2023-11-25 06:53:12+00:00
16k
microsoft/Project-BayesDAG
src/causica/preprocessing/data_processor.py
[ { "identifier": "CausalDataset", "path": "src/causica/datasets/dataset.py", "snippet": "class CausalDataset(Dataset):\n \"\"\"\n Class to store the np.ndarray adjacency matrix and samples\n from the intervention distributions as attributes of the Dataset object.\n \"\"\"\n\n def __init__...
import logging import warnings import numpy as np import torch from typing import Iterable, List, Optional, Tuple, TypeVar, Union from scipy import sparse from scipy.sparse import csr_matrix, issparse from sklearn.exceptions import NotFittedError from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.utils.validation import check_is_fitted from tqdm import tqdm from ..datasets.dataset import CausalDataset, Dataset, SparseDataset from ..datasets.intervention_data import InterventionData from ..datasets.variables import Variables from .transforms import IdentityTransform, UnitScaler
12,549
self._txt_unproc_cols, self._txt_proc_cols = [], [] self._num_processed_cols = sum(var.processed_dim for var in self._variables) def process_data_and_masks( self, data: csr_matrix, data_mask: csr_matrix, *extra_masks: csr_matrix, batch_size: int = 1000, ) -> Tuple[csr_matrix, ...]: """ Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an additional obs_mask. Args: data: Unprocessed data array data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are either 0 or 1. extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or 1. batch_size: Batch size used during data preprocessing for sparse matrices. Returns: processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised. processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding. processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot encoding. """ if not issparse(data): ( proc_data, proc_data_mask, *proc_extra_masks, ) = self._process_and_check_dense(data, data_mask, *extra_masks) else: # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but # allows us to reuse our preprocessing functions and keeps memory usage manageable. proc_data_list: List[csr_matrix] = [] proc_data_mask_list: List[csr_matrix] = [] proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks) num_rows = data.shape[0] for start_idx in tqdm(range(0, num_rows, batch_size), desc="Data preprocessing"): stop_idx = min(start_idx + batch_size, num_rows) data_batch = data[start_idx:stop_idx].toarray() data_mask_batch = data_mask[start_idx:stop_idx].toarray() extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks) # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning ( proc_data_batch, proc_data_mask_batch, *proc_extra_masks_batch, ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch) proc_data_list.append(csr_matrix(proc_data_batch)) proc_data_mask_list.append(csr_matrix(proc_data_mask_batch)) for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch): mask_list.append(csr_matrix(mask)) proc_data = sparse.vstack(proc_data_list, format="csr") proc_data_mask = sparse.vstack(proc_data_mask_list, format="csr") proc_extra_masks = tuple( sparse.vstack(proc_mask_list, format="csr") for proc_mask_list in proc_extra_masks_lists ) return (proc_data, proc_data_mask, *proc_extra_masks) def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray): """ Check validity of dense data and masks and process them. """ combined_mask = data_mask for mask in extra_masks: combined_mask = combined_mask * mask self.check_data(data, combined_mask) self.check_mask(data_mask) for mask in extra_masks: self.check_mask(mask) proc_data = self.process_data(data) proc_data_mask = self.process_mask(data_mask) proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks) return (proc_data, proc_data_mask, *proc_extra_masks) def process_intervention_data( self, intervention_data: Union[InterventionData, Iterable[InterventionData]] ) -> List[InterventionData]: """Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects. Args: intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of InterventionData objects to be processed. Returns: List[InterventionData]: List of processed InterventionData objects. """ if isinstance(intervention_data, InterventionData): intervention_data = [intervention_data] proc_intervention = [ InterventionData( i.intervention_idxs, self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs), self.process_data(i.test_data), i.conditioning_idxs, self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs), i.effect_idxs, self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs), self.process_data(i.reference_data) if i.reference_data is not None else None, ) for i in intervention_data ] return proc_intervention def process_dataset(
EPSILON = 1e-5 logger = logging.getLogger(__name__) V = TypeVar("V", np.ndarray, torch.Tensor) # pylint: disable=protected-access class DataProcessor: def __init__( self, variables: Variables, unit_scale_continuous: bool = True, standardize_data_mean: bool = False, standardize_data_std: bool = False, ): """ Args: variables (Variables): Information about variables/features used by this model. unit_scale_continuous (bool): Scale continuous variables to the range of [0, 1]. standardize_data_mean (bool): Standardize continuous variables to mean=0 standardize_data_std (bool): Standardize continuous variables to std=1 """ if unit_scale_continuous and (standardize_data_mean or standardize_data_std): raise ValueError("Cannot unit scale and standardize variables simultanously.") self._variables = variables # Call unprocessed columns unproc_cols, processed columns proc_cols unproc_cols_by_type = self._variables.unprocessed_cols_by_type proc_cols_by_type = self._variables.processed_cols_by_type def flatten(lists): # Flatten proc_cols for continuous and binary unproc_cols, since they will be of form [[1], [2], ...] return [i for sublist in lists for i in sublist] if "binary" in unproc_cols_by_type: self._bin_unproc_cols = unproc_cols_by_type["binary"] self._bin_proc_cols = flatten(proc_cols_by_type["binary"]) # Save contiguous regions containig binary features to allow for more efficient processing via slicing self._bin_unproc_regions = self.split_contiguous_sublists(self._bin_unproc_cols) self._bin_proc_regions = self.split_contiguous_sublists(self._bin_proc_cols) assert len(self._bin_unproc_regions) == len(self._bin_proc_regions) else: self._bin_unproc_cols, self._bin_proc_cols = [], [] if "continuous" in unproc_cols_by_type: self._cts_unproc_cols = unproc_cols_by_type["continuous"] self._cts_proc_cols = flatten(proc_cols_by_type["continuous"]) # Save contiguous regions containing continuous features to allow for more efficient processing via slicing if all(x.overwrite_processed_dim is None for x in self._variables): self._cts_unproc_regions = self.split_contiguous_sublists(self._cts_unproc_cols) self._cts_proc_regions = self.split_contiguous_sublists(self._cts_proc_cols) else: # For VAEM, we can only take single variable as region # to allow for processing/reverting mask self._cts_unproc_regions = [[col_id] for col_id in unproc_cols_by_type["continuous"]] self._cts_proc_regions = proc_cols_by_type["continuous"] assert len(self._cts_unproc_regions) == len(self._cts_proc_regions) if unit_scale_continuous: self._cts_normalizers = [ UnitScaler(variables[i] for i in unproc_region) for unproc_region in self._cts_unproc_regions ] elif standardize_data_mean or standardize_data_std: self._cts_normalizers = [ StandardScaler(with_mean=standardize_data_mean, with_std=standardize_data_std) for _ in self._cts_unproc_regions ] else: self._cts_normalizers = [IdentityTransform()] * len(self._cts_unproc_regions) else: self._cts_unproc_cols, self._cts_proc_cols, self._cts_normalizers = [], [], [] if "categorical" in unproc_cols_by_type: self._cat_unproc_cols = unproc_cols_by_type["categorical"] self._cat_proc_cols = flatten(proc_cols_by_type["categorical"]) self._cat_proc_cols_grouped = proc_cols_by_type["categorical"] def get_lower(idx): return self._variables[idx].lower def get_upper(idx): return self._variables[idx].upper var_categories = [ np.arange(int(get_lower(var_idx)), int(get_upper(var_idx)) + 1) for var_idx in self._cat_unproc_cols ] self._one_hot_encoder = OneHotEncoder(categories=var_categories, sparse=False, handle_unknown="ignore") # Fit on dummy data due to an issue in sklearn where the encoder needs to be fitted to data even if the # categories are specified upon creation. self._one_hot_encoder.fit(np.array([categories[0] for categories in var_categories]).reshape(1, -1)) else: self._cat_unproc_cols, self._cat_proc_cols = [], [] self._txt_unproc_cols, self._txt_proc_cols = [], [] self._num_processed_cols = sum(var.processed_dim for var in self._variables) def process_data_and_masks( self, data: csr_matrix, data_mask: csr_matrix, *extra_masks: csr_matrix, batch_size: int = 1000, ) -> Tuple[csr_matrix, ...]: """ Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an additional obs_mask. Args: data: Unprocessed data array data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are either 0 or 1. extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or 1. batch_size: Batch size used during data preprocessing for sparse matrices. Returns: processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised. processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding. processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot encoding. """ if not issparse(data): ( proc_data, proc_data_mask, *proc_extra_masks, ) = self._process_and_check_dense(data, data_mask, *extra_masks) else: # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but # allows us to reuse our preprocessing functions and keeps memory usage manageable. proc_data_list: List[csr_matrix] = [] proc_data_mask_list: List[csr_matrix] = [] proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks) num_rows = data.shape[0] for start_idx in tqdm(range(0, num_rows, batch_size), desc="Data preprocessing"): stop_idx = min(start_idx + batch_size, num_rows) data_batch = data[start_idx:stop_idx].toarray() data_mask_batch = data_mask[start_idx:stop_idx].toarray() extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks) # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning ( proc_data_batch, proc_data_mask_batch, *proc_extra_masks_batch, ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch) proc_data_list.append(csr_matrix(proc_data_batch)) proc_data_mask_list.append(csr_matrix(proc_data_mask_batch)) for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch): mask_list.append(csr_matrix(mask)) proc_data = sparse.vstack(proc_data_list, format="csr") proc_data_mask = sparse.vstack(proc_data_mask_list, format="csr") proc_extra_masks = tuple( sparse.vstack(proc_mask_list, format="csr") for proc_mask_list in proc_extra_masks_lists ) return (proc_data, proc_data_mask, *proc_extra_masks) def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray): """ Check validity of dense data and masks and process them. """ combined_mask = data_mask for mask in extra_masks: combined_mask = combined_mask * mask self.check_data(data, combined_mask) self.check_mask(data_mask) for mask in extra_masks: self.check_mask(mask) proc_data = self.process_data(data) proc_data_mask = self.process_mask(data_mask) proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks) return (proc_data, proc_data_mask, *proc_extra_masks) def process_intervention_data( self, intervention_data: Union[InterventionData, Iterable[InterventionData]] ) -> List[InterventionData]: """Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects. Args: intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of InterventionData objects to be processed. Returns: List[InterventionData]: List of processed InterventionData objects. """ if isinstance(intervention_data, InterventionData): intervention_data = [intervention_data] proc_intervention = [ InterventionData( i.intervention_idxs, self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs), self.process_data(i.test_data), i.conditioning_idxs, self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs), i.effect_idxs, self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs), self.process_data(i.reference_data) if i.reference_data is not None else None, ) for i in intervention_data ] return proc_intervention def process_dataset(
self, dataset: Union[Dataset, CausalDataset, SparseDataset]
0
2023-11-21 12:55:08+00:00
16k
ChenyangGao/python-epub3
epub3/epub.py
[ { "identifier": "File", "path": "epub3/util/file.py", "snippet": "class File:\n __slots__ = (\"path\", \"fs\", \"open\", \"open_modes\", \"_getattr\")\n ALL_MODES = frozenset(\"rwxab+\")\n\n def __init__(\n self, \n /, \n path=None, \n fs=None, \n open_modes=N...
import errno import io import os import os.path as ospath import posixpath from copy import deepcopy from datetime import datetime from fnmatch import translate as wildcard_translate from functools import cached_property, partial from inspect import getfullargspec, isclass from io import IOBase, TextIOWrapper from operator import methodcaller from os import fsdecode, remove, stat, stat_result, PathLike from pathlib import PurePosixPath from posixpath import join as joinpath, normpath from pprint import pformat from re import compile as re_compile, escape as re_escape, Pattern from shutil import copy, copyfileobj from typing import cast, Any, Callable, Container, Mapping, MutableMapping, Optional from types import MappingProxyType from uuid import uuid4 from warnings import warn from weakref import WeakKeyDictionary, WeakValueDictionary from urllib.parse import quote, unquote from zipfile import ZipFile, ZIP_STORED from .util.file import File, RootFS, TemporaryFS, OPEN_MODES from .util.helper import guess_media_type, values, items, sup from .util.proxy import proxy_property, ElementAttribProxy, ElementProxy, NAMESPACES from .util.remap import remap_links from .util.stream import PyLinq from .util.xml import el_add, el_del, el_iterfind, el_set from .util.undefined import undefined, UndefinedType from lxml.etree import fromstring, tostring, _Element as Element, _ElementTree as ElementTree # type: ignore from xml.etree.ElementTree import fromstring, tostring, Element, ElementTree # type: ignore
13,861
self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.update(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self #################### SubElement Methods #################### @PyLinq.streamify def filter(self, /, predicate=None): if not callable(predicate): return iter(self.values()) return filter(predicate, self.values()) @PyLinq.streamify def filter_by_attr(self, predicate=None, attr="media-type", /): def activate_predicate(predicate): if predicate is None: return None if callable(predicate): return predicate elif isinstance(predicate, Pattern): return predicate.search elif isinstance(predicate, str): use_false = False if predicate.startswith(r"!"): use_false = True predicate = predicate[1:] predicate_startswith = predicate.startswith if predicate_startswith(r"="): predicate = predicate[1:].__eq__ elif predicate_startswith(r"~"): predicate = methodcaller("__contains__", predicate[1:]) elif predicate_startswith(r"^"): predicate = methodcaller("startswith", predicate[1:]) elif predicate_startswith(r"$"): predicate = methodcaller("endswith", predicate[1:]) elif predicate_startswith(r";"): predicate = lambda s, needle=predicate[1:]: needle in s.split() elif predicate_startswith(r","): predicate = lambda s, needle=predicate[1:]: needle in s.split(",") elif predicate_startswith(r"<"): predicate = re_compile(r"\b"+re_escape(predicate[1:])).search elif predicate_startswith(r">"): predicate = re_compile(re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"|"): predicate = re_compile(r"\b"+re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"*"): predicate = re_compile(wildcard_translate(predicate[1:])).fullmatch elif predicate_startswith(r"/"): predicate = re_compile(predicate[1:]).search elif predicate_startswith(r"%"): predicate = re_compile(predicate[1:]).fullmatch else: predicate = predicate.__eq__ if use_false: predicate = lambda s, _pred=predicate: not _pred(s) return predicate elif type(predicate) in (tuple, list): preds = tuple(pred for p in predicate if (pred:=activate_predicate(p)) is not None) if not preds: return None if type(predicate) is tuple: return lambda s, _preds=preds: any(p(s) for p in preds) else: return lambda s, _preds=preds: all(p(s) for p in preds) elif isinstance(predicate, Container): return predicate.__contains__ predicate = activate_predicate(predicate) if predicate is None: return filter(lambda item: attr in item, self.values()) return filter(lambda item: attr in item and predicate(item[attr]), self.values()) @PyLinq.streamify def iter(self, /): root = self._root for el in root.iterfind("*"): if not (el.tag == "item" or el.tag.endswith("}item")): yield ElementProxy(el) continue id = el.attrib.get("id") href = el.attrib.get("href") if not href: if id is None or not super().__contains__(id): try: root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass else: item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") else: self.pop(id, None) warn(f"removed an item because of missing href attribute: {item!r}") continue href = unquote(href) if not el.attrib.get("media-type"):
#!/usr/bin/env python # coding: utf-8 __author__ = "ChenyangGao <https://chenyanggao.github.io>" __version__ = (0, 0, 1) __all__ = ["ePub", "Metadata", "DCTerm", "Meta", "Link", "Manifest", "Item", "Spine", "Itemref"] try: except ModuleNotFoundError: class DCTerm(ElementProxy): pass class Meta(ElementProxy): __protected_keys__ = ("property",) __optional_keys__ = ("dir", "id", "refines", "scheme", "xml:lang") class Link(ElementAttribProxy): __protected_keys__ = ("href", "rel") __optional_keys__ = ("hreflang", "id", "media-type", "properties", "refines") class Item(ElementAttribProxy): __const_keys__ = ("id",) __protected_keys__ = ("href", "media-type") __optional_keys__ = ("fallback", "media-overlay", "properties") __cache_get_state__ = lambda _, manifest: manifest def __init__(self, root: Element, manifest, /): super().__init__(root) self._manifest = manifest def __eq__(self, other, /): if type(self) is not type(other): return NotImplemented return self._manifest is other._manifest and self._attrib["href"] == other._attrib["href"] def __fspath__(self, /): return unquote(self._attrib["href"]) def __hash__(self, /): return hash((self._root, id(self._manifest))) def __setitem__(self, key, value, /): if key == "href": if value is None: raise ValueError("can't set href to None") self.rename(val) else: super().__setitem__(key, value) return self @property def filename(self, /): return PurePosixPath(joinpath(self.home, self)) @property def home(self, /): return PurePosixPath(self._manifest._epub._opf_dir) @property def name(self, /): return self.path.name @property def path(self, /): return PurePosixPath(self) @property def _parent(self, /): return posixpath.dirname(unquote(self._attrib["href"])) @property def parent(self, /): return self.path.parent @property def parents(self, /): return self.path.parents @property def parts(self, /): return self.path.parts @property def stem(self, /): return self.path.stem @property def suffix(self, /): return self.path.suffix @property def suffixes(self, /): return self.path.suffixes def update(self, attrib=None, /, **attrs): if attrib: attrib = dict(attrib) if attrs: attrib.update(attrs) else: attrib = attrs href = attrib.pop("href", None) if href: self.rename(href) if attrib: super().update(attrib) return self def is_relative_to(self, /, *other): return self.path.is_relative_to(*other) def joinpath(self, /, *others): return PurePosixPath(normpath(joinpath(self._parent, *others))) __truediv__ = joinpath def relpath(self, other, /): return PurePosixPath(posixpath.relpath(other, self._parent)) def relative_to(self, /, *other): return self.path.relative_to(*other) def with_name(self, /, name): return self.path.with_name(str(name)) def with_stem(self, /, stem): return self.path.with_stem(str(stem)) def with_suffix(self, /, suffix): return self.path.with_suffix(str(suffix)) def exists(self, /): return self._manifest.exists(self) def is_file(self, /): return self.exists() def is_dir(self, /): return False def is_symlink(self, /): return False def glob(self, /, pattern="*", ignore_case=False): return self._manifest.glob(pattern, self, ignore_case=ignore_case) def rglob(self, /, pattern="", ignore_case=False): return self._manifest.rglob(pattern, self, ignore_case=ignore_case) def iterdir(self, /): return self._manifest.iterdir(self) def match(self, /, path_pattern, ignore_case=False): path_pattern = path_pattern.strip("/") if not path_pattern: return False pattern = joinpath(*posix_glob_translate_iter(path_pattern)) if ignore_case: pattern = "(?i:%s)" % pattern return re_compile(pattern).fullmatch(self._attrib["href"]) is not None def open( self, /, mode="r", buffering=-1, encoding=None, errors=None, newline=None, ): return self._manifest.open( self, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, /, buffering=0): return self._manifest.read(self, buffering=buffering) read_bytes = read def read_text(self, /, encoding=None): return self._manifest.read_text(self, encoding=encoding) def remove(self, /): self._manifest.remove(self) return self def rename(self, dest_href, /, repair=False): return self._manifest.rename(self, dest_href, repair=repair) def batch_rename(self, mapper, /, predicate=None, repair=False): return self._manifest.batch_rename(self, mapper, predicate=predicate, repair=repair) def replace(self, href, /): self._manifest.replace(self, href) return self def stat(self, /) -> Optional[stat_result]: return self._manifest.stat(self) def touch(self, /): self._manifest.touch(self) return self unlink = remove def write(self, /, data): return self._manifest.write(self, data) write_bytes = write def write_text(self, /, text, encoding=None, errors=None, newline=None): return self._manifest.write_text(self, text, encoding=encoding, errors=errors, newline=newline) class Itemref(ElementAttribProxy): __const_keys__ = ("idref",) __optional_keys__ = ("id", "linear", "properties") @property def linear(self, /): return "no" if self._attrib.get("linear") == "no" else "yes" @linear.setter def linear(self, value, /): self._attrib["linear"] = "no" if value == "no" else "yes" class Metadata(ElementProxy): __wrap_class_map__ = {"{*}meta": Meta, "{*}": Link, "dc:*": DCTerm} def __repr__(self, /): return f"{super().__repr__()}\n{pformat(self.iter().list())}" @property def info(self, /): return tuple(meta.info for meta in self.iter()) def add( self, name: str = "meta", /, attrib: Optional[Mapping] = None, text: Optional[str] = None, tail: Any = undefined, **_disregards, ): return super().add(name, attrib=attrib, text=text) def dc( self, name: str, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if text_value is not undefined: if find_attrib: find_attrib = {**find_attrib, "": text_value} else: find_attrib = {"": text_value} return self.setfind( "dc:%s" % name, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def meta( self, preds: str = "", /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): return self.setfind( "{*}meta%s" % preds, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def name_meta( self, name, content: Optional[str] = None, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "name": name} else: find_attrib = {"name": name} if content is not None: find_attrib["content"] = content return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def property_meta( self, property, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "property": property} else: find_attrib = {"property": property} if text_value is not undefined: find_attrib[""] = text_value return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) class ManifestProxy(ElementAttribProxy): __optional_keys__ = ("id",) class Manifest(dict[str, Item]): def __init__(self, /, root: Element, epub): self._root = root self._attrib = root.attrib self._epub = epub self._proxy = ManifestProxy(root) self._href_to_id: dict[str, str] = {} self._href_to_file: dict[str, File] = {} if len(root): href_to_id = self._href_to_id dangling_items = [] for item in root.iterfind("{*}item"): id = item.attrib.get("id") href = item.attrib.get("href") if id is None or not href: dangling_items.append(item) continue id = cast(str, id) href = cast(str, unquote(href)) super().__setitem__(id, Item(item, self)) href_to_id[href] = id if dangling_items: for item in reversed(dangling_items): root.remove(item) warn(f"removed a dangling item element: {item!r}") zfile = epub.__dict__.get("_zfile") opf_dir = epub._opf_dir if zfile: href_to_file = self._href_to_file for href in href_to_id: zpath = joinpath(opf_dir, href) zinfo = zfile.NameToInfo.get(zpath) if not zinfo or zinfo.is_dir(): warn(f"missing file in original epub: {href!r}") href_to_file[href] = File(str(uuid4()), self._workfs) else: href_to_file[href] = File(zpath, zfile, open_modes="r") def __init_subclass__(self, /, **kwargs): raise TypeError("subclassing is not allowed") def __call__(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") return href if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id[href] except LookupError as e: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") from e return super().__getitem__(id) def __contains__(self, other, /): if isinstance(other, Item): return other._manifest is self and super().__contains__(other["id"]) return super().__contains__(other) def __delitem__(self, key, /): pop = self.pop if isinstance(key, int): el = self._root[key] try: id = el.attrib["id"] except AttributeError: try: self._root.remove(el) except: pass else: pop(id) elif isinstance(key, slice): root = self._root for el in root[key]: try: id = el.attrib["id"] except AttributeError: try: root.remove(el) except: pass else: pop(id, None) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") pop(key["id"]) elif isinstance(key, str): pop(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") return self def __getitem__(self, key, /): def wrap(el): try: if el.tag == "item" or el.tag.endswith("}item"): return Item(el, self) return ElementProxy(el) except AttributeError: return el if isinstance(key, int): return wrap(self._root[key]) elif isinstance(key, slice): return list(map(wrap, self._root[key])) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") return key elif isinstance(key, str): return super().__getitem__(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") def __setitem__(self, id, value, /): if id not in self: raise LookupError(f"no such item: {id!r}") if isinstance(id, Item): item = id else: item = super().__getitem__(id) href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return self @cached_property def _workfs(self, /): if self._epub._maketemp: return TemporaryFS(self._epub._workroot) else: return RootFS(self._epub._workroot) @cached_property def href_to_id(self, /): return MappingProxyType(self._href_to_id) @cached_property def href_to_file(self, /): return MappingProxyType(self._href_to_file) @property def home(self, /): return self._epub._opf_dir @property def attrib(self, /): return self._attrib @property def proxy(self, /): return self._proxy @property def info(self, /): return tuple(item.info for item in self.values()) delete = __delitem__ def clear(self, /): self._root.clear() self._href_to_file.clear() self._href_to_id.clear() super().clear() return self def pop(self, id, /, default=undefined): if id not in self: if default is undefined: raise LookupError(f"no such item: {id!r}") return default if isinstance(id, Item): id = id["id"] item = super().pop(id) try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return item def popitem(self, /): id, item = super().popitem() try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return id, item def set(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(href, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return item def setdefault(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(value, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: if isinstance(value, Mapping) and not ("open" in value and callable(value["open"])): item.merge(value) return item def merge(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.merge(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.merge(attrs) else: self._proxy.merge(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.update(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self #################### SubElement Methods #################### @PyLinq.streamify def filter(self, /, predicate=None): if not callable(predicate): return iter(self.values()) return filter(predicate, self.values()) @PyLinq.streamify def filter_by_attr(self, predicate=None, attr="media-type", /): def activate_predicate(predicate): if predicate is None: return None if callable(predicate): return predicate elif isinstance(predicate, Pattern): return predicate.search elif isinstance(predicate, str): use_false = False if predicate.startswith(r"!"): use_false = True predicate = predicate[1:] predicate_startswith = predicate.startswith if predicate_startswith(r"="): predicate = predicate[1:].__eq__ elif predicate_startswith(r"~"): predicate = methodcaller("__contains__", predicate[1:]) elif predicate_startswith(r"^"): predicate = methodcaller("startswith", predicate[1:]) elif predicate_startswith(r"$"): predicate = methodcaller("endswith", predicate[1:]) elif predicate_startswith(r";"): predicate = lambda s, needle=predicate[1:]: needle in s.split() elif predicate_startswith(r","): predicate = lambda s, needle=predicate[1:]: needle in s.split(",") elif predicate_startswith(r"<"): predicate = re_compile(r"\b"+re_escape(predicate[1:])).search elif predicate_startswith(r">"): predicate = re_compile(re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"|"): predicate = re_compile(r"\b"+re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"*"): predicate = re_compile(wildcard_translate(predicate[1:])).fullmatch elif predicate_startswith(r"/"): predicate = re_compile(predicate[1:]).search elif predicate_startswith(r"%"): predicate = re_compile(predicate[1:]).fullmatch else: predicate = predicate.__eq__ if use_false: predicate = lambda s, _pred=predicate: not _pred(s) return predicate elif type(predicate) in (tuple, list): preds = tuple(pred for p in predicate if (pred:=activate_predicate(p)) is not None) if not preds: return None if type(predicate) is tuple: return lambda s, _preds=preds: any(p(s) for p in preds) else: return lambda s, _preds=preds: all(p(s) for p in preds) elif isinstance(predicate, Container): return predicate.__contains__ predicate = activate_predicate(predicate) if predicate is None: return filter(lambda item: attr in item, self.values()) return filter(lambda item: attr in item and predicate(item[attr]), self.values()) @PyLinq.streamify def iter(self, /): root = self._root for el in root.iterfind("*"): if not (el.tag == "item" or el.tag.endswith("}item")): yield ElementProxy(el) continue id = el.attrib.get("id") href = el.attrib.get("href") if not href: if id is None or not super().__contains__(id): try: root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass else: item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") else: self.pop(id, None) warn(f"removed an item because of missing href attribute: {item!r}") continue href = unquote(href) if not el.attrib.get("media-type"):
el.attrib["media-type"] = guess_media_type(href)
4
2023-11-20 14:46:41+00:00
16k
ymp5078/AI-SAM
segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decode...
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,146
for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset, ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge( data["boxes"], crop_box, [0, 0, orig_w, orig_h] ) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE
# -*- coding: utf-8 -*- # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [ coco_encode_rle(rle) for rle in mask_data["rles"] ] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset, ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge( data["boxes"], crop_box, [0, 0, orig_w, orig_h] ) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE
data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
16
2023-11-26 23:42:53+00:00
16k
sophiaalthammer/alforrankers
matchmaker/utils/input_pipeline.py
[ { "identifier": "ConditionalQueryGenerationInferenceReader", "path": "matchmaker/dataloaders/query_generation_inference_loader.py", "snippet": "class ConditionalQueryGenerationInferenceReader(DatasetReader):\n \"\"\"\n Read a tsv file containing a passage collection.\n \n Expected format for...
import torch import numpy import random import torch.multiprocessing as mp from allennlp.data.samplers import BucketBatchSampler, MaxTokensBatchSampler from allennlp.data.vocabulary import Vocabulary from allennlp.data.data_loaders import MultiProcessDataLoader from transformers import T5Tokenizer from allennlp.data.token_indexers import PretrainedTransformerIndexer from allennlp.data.tokenizers import PretrainedTransformerTokenizer from matchmaker.dataloaders.concatenated_reranking_loader import * from matchmaker.dataloaders.concatenated_training_loader import * from matchmaker.dataloaders.independent_reranking_loader import * from matchmaker.dataloaders.independent_training_loader import * from matchmaker.dataloaders.id_sequence_loader import * from matchmaker.dataloaders.mlm_masked_sequence_loader import * from matchmaker.dataloaders.query_generation_inference_loader import ConditionalQueryGenerationInferenceReader from matchmaker.dataloaders.tas_balanced_training_loader import * from matchmaker.dataloaders.pseudo_label_training_loader import PseudoLabelDatasetLoader, PseudoLabelTextDatasetLoader from matchmaker.dataloaders.triple_id_training_loader import TripleIdDatasetLoader from transformers import AutoTokenizer from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer from matchmaker.dataloaders.transformer_tokenizer import FastTransformerTokenizer from matchmaker.modules.bert_embedding_token_embedder import PretrainedBertIndexerNoSpecialTokens from typing import Dict, Tuple, List
11,844
# loader = IrDynamicTripleDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], # qrels_file=run_config["dynamic_qrels_file"], candidate_file=run_config["dynamic_candidate_file"], # batch_size=int(run_config["batch_size_train"]), queries_per_batch=run_config["dynamic_queries_per_batch"], tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], # min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], # data_augment=run_config["train_data_augment"], vocab=_vocab) if run_config["dynamic_sampler_type"] == "tas_balanced": loader = TASBalancedDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], pairs_with_teacher_scores=run_config["dynamic_pairs_with_teacher_scores"], query_cluster_file=run_config["dynamic_query_cluster_file"], batch_size=int(run_config["batch_size_train"]), clusters_per_batch=run_config["dynamic_clusters_per_batch"], tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], pair_balancing_strategy=run_config["tas_balanced_pair_strategy"],random_seed =run_config["random_seed"]) elif run_config["dynamic_sampler_type"] == "pseudo_label": loader = PseudoLabelDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], selection_type=run_config["pseudo_label_selection_type"],min_pos_score=run_config["pseudo_label_min_pos_score"], max_diff_to_be_pos=run_config["pseudo_label_max_diff_to_be_pos"],min_diff_to_neg=run_config["pseudo_label_min_diff_to_neg"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "pseudo_labeltext": loader = PseudoLabelTextDatasetLoader(rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"]
#from tokenizers import ByteLevelBPETokenizer,CharBPETokenizer #from matchmaker.dataloaders.transformer_tokenizer import CustomTransformerTokenizer,CustomTransformerIndexer mp.set_sharing_strategy("file_system") # VERY MUCH needed for linux !! makes everything faster, but tends to break stuff def allennlp_single_sequence_loader(model_config, run_config, _input_file, sequence_type, force_exact_batch_size=False): ''' Load examples from a .tsv file in the single sequence format: id<tab>text (Using allennlp's v2 multiprocess loader) ''' if model_config.get("model_input_type", "") == "mlm": sequence_type == "single_mlm" if sequence_type == "query": max_length = run_config.get("overwrite_max_query_length", model_config["max_query_length"]) min_length = model_config.get("min_query_length",-1) batch_size = run_config["query_batch_size"] split_document=False split_document_window_size=-1 if sequence_type == "single_mlm": max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length", -1) batch_size = run_config.get("collection_batch_size", run_config["batch_size_train"]) make_multiple_of=run_config.get("make_multiple_of",8) mask_probability=run_config.get("mask_probability",0.1) mlm_mask_replace_probability=run_config.get("mlm_mask_replace_probability",0.5) mlm_mask_random_probability=run_config.get("mlm_mask_random_probability",0.5) else: # doc max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length",-1) batch_size = run_config["collection_batch_size"] split_document=run_config.get("split_document",False) split_document_window_size=run_config.get("split_document_window_size",-1) _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max_length) #if model_config.get("model_input_type", "") == "mlm": # reader = MLMMaskedSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=max_length, min_doc_length=min_length, # mask_probability=mask_probability, # mlm_mask_replace_probability=mlm_mask_replace_probability, # mlm_mask_random_probability=mlm_mask_random_probability, # make_multiple_of=make_multiple_of) reader = IdSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, split_document=split_document,split_document_window_size=split_document_window_size, max_seq_length=max_length, min_seq_length=min_length, sequence_type=sequence_type) if force_exact_batch_size: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=int(batch_size)) else: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["seq_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_triple_training_loader(model_config, run_config, _input_file,add_text_to_batch=False): ''' Load training examples (either in the re-ranking text file format or a dynamic loader) (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if run_config.get("dynamic_sampler", False) == False: if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) else: reader = IndependentTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], query_augment_mask_number=run_config["query_augment_mask_number"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) else: #if run_config["dynamic_sampler_type"] == "list": # loader = IrDynamicTripleDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], # qrels_file=run_config["dynamic_qrels_file"], candidate_file=run_config["dynamic_candidate_file"], # batch_size=int(run_config["batch_size_train"]), queries_per_batch=run_config["dynamic_queries_per_batch"], tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], # min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], # data_augment=run_config["train_data_augment"], vocab=_vocab) if run_config["dynamic_sampler_type"] == "tas_balanced": loader = TASBalancedDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], pairs_with_teacher_scores=run_config["dynamic_pairs_with_teacher_scores"], query_cluster_file=run_config["dynamic_query_cluster_file"], batch_size=int(run_config["batch_size_train"]), clusters_per_batch=run_config["dynamic_clusters_per_batch"], tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], pair_balancing_strategy=run_config["tas_balanced_pair_strategy"],random_seed =run_config["random_seed"]) elif run_config["dynamic_sampler_type"] == "pseudo_label": loader = PseudoLabelDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], selection_type=run_config["pseudo_label_selection_type"],min_pos_score=run_config["pseudo_label_min_pos_score"], max_diff_to_be_pos=run_config["pseudo_label_max_diff_to_be_pos"],min_diff_to_neg=run_config["pseudo_label_min_diff_to_neg"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "pseudo_labeltext": loader = PseudoLabelTextDatasetLoader(rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"]
reader = ConditionalQueryGenerationInferenceReader(tokenizer=_tokenizer, token_indexers=_token_indexers,
0
2023-11-21 10:38:22+00:00
16k
MICLab-Unicamp/medpseg
medpseg/poly_pipeline.py
[ { "identifier": "PolySeg2DModule", "path": "medpseg/poly_seg_2d_module.py", "snippet": "class PolySeg2DModule(pl.LightningModule):\n '''\n Regarding of the name, also works with 3D networks\n '''\n def __init__(self, hparams):\n '''\n Check starter.py for description of all hpa...
import os import torch import numpy as np import cc3d import SimpleITK as sitk from medpseg.poly_seg_2d_module import PolySeg2DModule from medpseg.eval_2d_utils import E2DStackDataset, argon_cpu_count from torch.nn import functional as F from tqdm import tqdm from collections import defaultdict from operator import itemgetter from typing import Dict, Optional from multiprocessing import Queue
11,678
label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction ''' e2d_stack_dataloader = E2DStackDataset(volume, extended_2d=1).get_dataloader(batch_size=batch_size, pin_memory=False, num_workers=argon_cpu_count()) outs = defaultdict(list) np_outs = {} np_means = {} np_stds = {} uncertainty_means = defaultdict(list) uncertainty_stds = defaultdict(list) for input_slice in tqdm(e2d_stack_dataloader, desc=f"Slicing with batch size {batch_size}."): if info_q is not None: package = input_slice[0].numpy().transpose(1, 2, 0).copy() # Not necessary anymore with current pre processing: September 2023 # package = (package + 1024) / (600 + 1024) info_q.image_to_front_end(package) if uncertainty is None: out = model(input_slice.to(device), stacking=True) for key, y_hat in out.items(): outs[key].append(y_hat.cpu()) else: raise DeprecationWarning("Uncertainty deprecated for now, needs update") # Save outputs for each branch in buffer uncertainty_buffer = defaultdict(list) model.train() for _ in tqdm(range(uncertainty)): # 8 equivalent to (0, 1, 2) flips out = model(input_slice.to(device), stacking=True) for key, y_hat in out.items(): uncertainty_buffer[key].append(y_hat.cpu()) model.eval() # Collect buffer items into means and STDs for each branch for key, buffer in uncertainty_buffer.items(): # use stack to keep batch dimension separate from acumulation dimension # statistics will take that dimension out buffer = torch.stack(buffer, dim=0) uncertainty_means[key].append(buffer.mean(dim=0)) uncertainty_stds[key].append(buffer.std(dim=0)) # Certain prediction volumes. Will no run if in uncertain mode. for key, y_hat in outs.items(): np_outs[key] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) # Compute final volume for uncertainty mean and uncertainty itself (STD) if uncertainty is not None: raise DeprecationWarning("Uncertainty deprecated for now, needs update") for key, y_hat in uncertainty_means.items(): np_means[key] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) for key, y_hat in uncertainty_stds.items(): np_stds[f"{key}_uncertainty"] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) if uncertainty is None: return np_outs else: return np_means, np_stds class PolySegmentationPipeline(): ''' This pipeline does all targets in a single weight ''' def __init__(self, weight="/home/diedre/diedre_phd/phd/models/medseg_25d_a100_long_silver_gold_gdl-epoch=22-step=76176-val_loss_3d=0.25-healthy_dice_3d=0.93-unhealthy_dice_3d=0.71-ggo_dice_3d=0.71-con_dice_3d=0.62-airway_dice_3d=0.90-vessel_dice_3d=0.87.ckpt", batch_size=1, # increase with high memory gpus cpu=False, output_dir=None, post=False): self.version = 'silver_gold_gdl' self.batch_size = batch_size self.device = torch.device("cpu") if cpu else torch.device("cuda:0")
''' Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab) https://miclab.fee.unicamp.br/ https://github.com/MICLab-Unicamp/medpseg All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Independent script Updated pipeline using a single weight ''' def get_connected_components(volume, return_largest=2, verbose=False): ''' volume: input volume return_largest: how many of the largest labels to return. If 0, nothing is changed in input volume verbose: prints label_count returns: filtered_volume, label_count, labeled_volume ''' labels_out = cc3d.connected_components(volume.astype(np.int32)) label_count = np.unique(labels_out, return_counts=True)[1] # Indicate which was the original label and sort by count label_count = [(label, count) for label, count in enumerate(label_count)] label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction ''' e2d_stack_dataloader = E2DStackDataset(volume, extended_2d=1).get_dataloader(batch_size=batch_size, pin_memory=False, num_workers=argon_cpu_count()) outs = defaultdict(list) np_outs = {} np_means = {} np_stds = {} uncertainty_means = defaultdict(list) uncertainty_stds = defaultdict(list) for input_slice in tqdm(e2d_stack_dataloader, desc=f"Slicing with batch size {batch_size}."): if info_q is not None: package = input_slice[0].numpy().transpose(1, 2, 0).copy() # Not necessary anymore with current pre processing: September 2023 # package = (package + 1024) / (600 + 1024) info_q.image_to_front_end(package) if uncertainty is None: out = model(input_slice.to(device), stacking=True) for key, y_hat in out.items(): outs[key].append(y_hat.cpu()) else: raise DeprecationWarning("Uncertainty deprecated for now, needs update") # Save outputs for each branch in buffer uncertainty_buffer = defaultdict(list) model.train() for _ in tqdm(range(uncertainty)): # 8 equivalent to (0, 1, 2) flips out = model(input_slice.to(device), stacking=True) for key, y_hat in out.items(): uncertainty_buffer[key].append(y_hat.cpu()) model.eval() # Collect buffer items into means and STDs for each branch for key, buffer in uncertainty_buffer.items(): # use stack to keep batch dimension separate from acumulation dimension # statistics will take that dimension out buffer = torch.stack(buffer, dim=0) uncertainty_means[key].append(buffer.mean(dim=0)) uncertainty_stds[key].append(buffer.std(dim=0)) # Certain prediction volumes. Will no run if in uncertain mode. for key, y_hat in outs.items(): np_outs[key] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) # Compute final volume for uncertainty mean and uncertainty itself (STD) if uncertainty is not None: raise DeprecationWarning("Uncertainty deprecated for now, needs update") for key, y_hat in uncertainty_means.items(): np_means[key] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) for key, y_hat in uncertainty_stds.items(): np_stds[f"{key}_uncertainty"] = torch.cat(y_hat).unsqueeze(0).permute(0, 2, 1, 3, 4) if uncertainty is None: return np_outs else: return np_means, np_stds class PolySegmentationPipeline(): ''' This pipeline does all targets in a single weight ''' def __init__(self, weight="/home/diedre/diedre_phd/phd/models/medseg_25d_a100_long_silver_gold_gdl-epoch=22-step=76176-val_loss_3d=0.25-healthy_dice_3d=0.93-unhealthy_dice_3d=0.71-ggo_dice_3d=0.71-con_dice_3d=0.62-airway_dice_3d=0.90-vessel_dice_3d=0.87.ckpt", batch_size=1, # increase with high memory gpus cpu=False, output_dir=None, post=False): self.version = 'silver_gold_gdl' self.batch_size = batch_size self.device = torch.device("cpu") if cpu else torch.device("cuda:0")
self.model = PolySeg2DModule.load_from_checkpoint(weight, map_location="cpu").eval()
0
2023-11-21 20:03:33+00:00
16k
DLYuanGod/TinyGPT-V
minigpt4/datasets/builders/image_text_pair_builder.py
[ { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def ...
import os import logging import warnings from minigpt4.common.registry import registry from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder from minigpt4.datasets.datasets.laion_dataset import LaionDataset from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset from minigpt4.datasets.datasets.text_caps import TextCapDataset from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset from minigpt4.datasets.datasets.gqa_datasets import GQADataset from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset from minigpt4.datasets.datasets.coco_caption import COCOCapDataset
11,462
self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_conversation") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/conversation.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder):
@registry.register_builder("multitask_conversation") class MultitaskConversationBuilder(BaseDatasetBuilder): train_dataset_cls = MultiTaskConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/multitask_conversation/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("unnatural_instruction") class UnnaturalInstructionBuilder(BaseDatasetBuilder): train_dataset_cls = UnnaturalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/nlp/unnatural_instruction.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( text_processor=self.text_processors["train"], ann_path=build_info.ann_path, ) return datasets @registry.register_builder("llava_detail") class LlavaDetailBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/detail.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_reason") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaReasonDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/reason.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_conversation") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/conversation.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder):
train_dataset_cls = ReferVisualGenomeDataset
14
2023-12-28 05:47:18+00:00
16k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/pipelines/pipeline_flax_utils.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~Conf...
import importlib import inspect import os import flax import numpy as np import PIL.Image from typing import Any, Dict, List, Optional, Union from flax.core.frozen_dict import FrozenDict from huggingface_hub import create_repo, snapshot_download from PIL import Image from tqdm.auto import tqdm from ..configuration_utils import ConfigMixin from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin from ..utils import ( CONFIG_NAME, DIFFUSERS_CACHE, BaseOutput, PushToHubMixin, http_user_agent, is_transformers_available, logging, ) from transformers import FlaxPreTrainedModel from diffusers import pipelines from diffusers import pipelines
11,032
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_transformers_available(): INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass class FlaxImagePipelineOutput(BaseOutput): """ Output class for image pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. """ images: Union[List[PIL.Image.Image], np.ndarray]
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_transformers_available(): INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass class FlaxImagePipelineOutput(BaseOutput): """ Output class for image pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. """ images: Union[List[PIL.Image.Image], np.ndarray]
class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
7
2023-12-28 08:17:40+00:00
16k
FoundationVision/UniRef
detectron2/evaluation/coco_evaluation.py
[ { "identifier": "CfgNode", "path": "detectron2/config/config.py", "snippet": "class CfgNode(_CfgNode):\n \"\"\"\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n 1. Use unsafe yaml loading by default.\n Note that this may lead to arbitrary code execution: you must not\n ...
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.evaluation.refcocoeval import RefCOCOeval
13,051
try: except ImportError: COCOeval_opt = COCOeval class COCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), allow_cached_coco=True, force_tasks=None, refcoco=False ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (int): limit on the maximum number of detections per image. By default in COCO, this limit is to 100, but this can be customized to be greater, as is needed in evaluation metrics AP fixed and AP pool (see https://arxiv.org/pdf/2102.01066.pdf) This doesn't affect keypoint evaluation. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval When empty, it will use the defaults in COCO. Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. allow_cached_coco (bool): Whether to use cached coco json from previous validation runs. You should set this to False if you need to use different validation data. Defaults to True. """ self.dataset_name = dataset_name self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self.force_tasks = force_tasks self.refcoco = refcoco if use_fast_impl and (COCOeval_opt is COCOeval): self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") use_fast_impl = False self._use_fast_impl = use_fast_impl # COCOeval requires the limit on the number of detections per image (maxDets) to be a list # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the # 3rd element (100) is used as the limit on the number of detections per image when # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. if max_dets_per_image is None: max_dets_per_image = [1, 10, 100] else: max_dets_per_image = [1, 10, max_dets_per_image] self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path
# Copyright (c) Facebook, Inc. and its affiliates. try: except ImportError: COCOeval_opt = COCOeval class COCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), allow_cached_coco=True, force_tasks=None, refcoco=False ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (int): limit on the maximum number of detections per image. By default in COCO, this limit is to 100, but this can be customized to be greater, as is needed in evaluation metrics AP fixed and AP pool (see https://arxiv.org/pdf/2102.01066.pdf) This doesn't affect keypoint evaluation. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval When empty, it will use the defaults in COCO. Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. allow_cached_coco (bool): Whether to use cached coco json from previous validation runs. You should set this to False if you need to use different validation data. Defaults to True. """ self.dataset_name = dataset_name self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self.force_tasks = force_tasks self.refcoco = refcoco if use_fast_impl and (COCOeval_opt is COCOeval): self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") use_fast_impl = False self._use_fast_impl = use_fast_impl # COCOeval requires the limit on the number of detections per image (maxDets) to be a list # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the # 3rd element (100) is used as the limit on the number of detections per image when # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. if max_dets_per_image is None: max_dets_per_image = [1, 10, 100] else: max_dets_per_image = [1, 10, max_dets_per_image] self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self._metadata = MetadataCatalog.get(dataset_name) if not hasattr(self._metadata, "json_file"): if output_dir is None: raise ValueError( "output_dir must be provided to COCOEvaluator " "for datasets not in COCO format." ) self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco)
2
2023-12-22 13:31:33+00:00
16k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
14,266
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config )
self.feature_network = get_mlp(
8
2023-12-23 12:37:48+00:00
16k
Con6924/SPM
evaluate_task.py
[ { "identifier": "config", "path": "src/configs/config.py", "snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel)...
import argparse import gc import warnings import torch from pathlib import Path from typing import Literal from torch.utils.data import DataLoader from accelerate import PartialState, Accelerator from src.configs import config from src.configs.config import RootConfig from src.configs.generation_config import GenerationConfig from src.engine import train_util from src.evaluation import * from src.models import model_util from src.models.spm import SPMLayer, SPMNetwork from src.models.merge_spm import load_state_dict from src.misc.sld_pipeline import SLDPipeline
11,603
def parse_extra_args(extra_args): if extra_args is None or extra_args == ['']: return {} extra_args_dict = {} for extra_arg in extra_args: key, value = extra_arg.split("=") # convert value to various types if value.isdigit(): value = int(value) elif value.replace(".", "", 1).isdigit(): value = float(value) elif value[0] == "[" and value[-1] == "]": value = [i.replace('+', ' ') for i in value[1:-1].split(",")] value = [v.strip() for v in value] if value[0].isdigit(): value = [int(v) for v in value] elif value[0].replace(".", "", 1).isdigit(): value = [float(v) for v in value] extra_args_dict[key] = value return extra_args_dict def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD
DIFFUSERS_CACHE_DIR = ".cache/" UNET_NAME = "unet" TEXT_ENCODER_NAME = "text_encoder" MATCHING_METRICS = Literal[ "clipcos", "clipcos_tokenuni", "tokenuni", "allone", ] distributed_state = PartialState() accelerator = Accelerator() def flush(): torch.cuda.empty_cache() gc.collect() def parse_extra_args(extra_args): if extra_args is None or extra_args == ['']: return {} extra_args_dict = {} for extra_arg in extra_args: key, value = extra_arg.split("=") # convert value to various types if value.isdigit(): value = int(value) elif value.replace(".", "", 1).isdigit(): value = float(value) elif value[0] == "[" and value[-1] == "]": value = [i.replace('+', ' ') for i in value[1:-1].split(",")] value = [v.strip() for v in value] if value[0].isdigit(): value = [int(v) for v in value] elif value[0].replace(".", "", 1).isdigit(): value = [float(v) for v in value] extra_args_dict[key] = value return extra_args_dict def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD
tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model(
4
2023-12-26 03:19:16+00:00
16k
dakpinaroglu/Frame2seq
frame2seq/openfold/utils/feats.py
[ { "identifier": "protein", "path": "frame2seq/openfold/np/protein.py", "snippet": "PICO_TO_ANGSTROM = 0.01\nclass Protein:\ndef from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:\ndef from_proteinnet_string(proteinnet_str: str) -> Protein:\ndef get_pdb_headers(prot: Protein, chain...
import math import numpy as np import torch import torch.nn as nn import frame2seq.openfold.np.residue_constants as rc from typing import Dict from frame2seq.openfold.np import protein from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( batched_gather, one_hot, tree_map, tensor_tree_map, )
11,342
def build_template_pair_feat( batch, min_bin, max_bin, no_bins, use_unit_vector=False, eps=1e-20, inf=1e8 ): template_mask = batch["template_pseudo_beta_mask"] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] # Compute distogram (this seems to differ slightly from Alg. 5) tpb = batch["template_pseudo_beta"] dgram = torch.sum( (tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True ) lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2 upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1) dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype) to_concat = [dgram, template_mask_2d[..., None]] aatype_one_hot = nn.functional.one_hot( batch["template_aatype"], rc.restype_num + 2, ) n_res = batch["template_aatype"].shape[-1] to_concat.append( aatype_one_hot[..., None, :, :].expand( *aatype_one_hot.shape[:-2], n_res, -1, -1 ) ) to_concat.append( aatype_one_hot[..., None, :].expand( *aatype_one_hot.shape[:-2], -1, n_res, -1 ) ) n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]] rigids = Rigid.make_transform_from_reference( n_xyz=batch["template_all_atom_positions"][..., n, :], ca_xyz=batch["template_all_atom_positions"][..., ca, :], c_xyz=batch["template_all_atom_positions"][..., c, :], eps=eps, ) points = rigids.get_trans()[..., None, :, :] rigid_vec = rigids[..., None].invert_apply(points) inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec ** 2, dim=-1)) t_aa_masks = batch["template_all_atom_mask"] template_mask = ( t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c] ) template_mask_2d = template_mask[..., None] * template_mask[..., None, :] inv_distance_scalar = inv_distance_scalar * template_mask_2d unit_vector = rigid_vec * inv_distance_scalar[..., None] if(not use_unit_vector): unit_vector = unit_vector * 0. to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1)) to_concat.append(template_mask_2d[..., None]) act = torch.cat(to_concat, dim=-1) act = act * template_mask_2d[..., None] return act def build_extra_msa_feat(batch): msa_1hot = nn.functional.one_hot(batch["extra_msa"], 23) msa_feat = [ msa_1hot, batch["extra_has_deletion"].unsqueeze(-1), batch["extra_deletion_value"].unsqueeze(-1), ] return torch.cat(msa_feat, dim=-1) def torsion_angles_to_frames( r: Rigid, alpha: torch.Tensor, aatype: torch.Tensor, rrgdf: torch.Tensor, ): # [*, N, 8, 4, 4] default_4x4 = rrgdf[aatype, ...] # [*, N, 8] transformations, i.e. # One [*, N, 8, 3, 3] rotation matrix and # One [*, N, 8, 3] translation matrix default_r = r.from_tensor_4x4(default_4x4) bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2)) bb_rot[..., 1] = 1 # [*, N, 8, 2] alpha = torch.cat( [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2 ) # [*, N, 8, 3, 3] # Produces rotation matrices of the form: # [ # [1, 0 , 0 ], # [0, a_2,-a_1], # [0, a_1, a_2] # ] # This follows the original code rather than the supplement, which uses # different indices. all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape) all_rots[..., 0, 0] = 1 all_rots[..., 1, 1] = alpha[..., 1] all_rots[..., 1, 2] = -alpha[..., 0] all_rots[..., 2, 1:] = alpha
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks): is_gly = aatype == rc.restype_order["G"] ca_idx = rc.atom_order["CA"] cb_idx = rc.atom_order["CB"] pseudo_beta = torch.where( is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3), all_atom_positions[..., ca_idx, :], all_atom_positions[..., cb_idx, :], ) if all_atom_masks is not None: pseudo_beta_mask = torch.where( is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx], ) return pseudo_beta, pseudo_beta_mask else: return pseudo_beta def atom14_to_atom37(atom14, batch): atom37_data = batched_gather( atom14, batch["residx_atom37_to_atom14"], dim=-2, no_batch_dims=len(atom14.shape[:-2]), ) atom37_data = atom37_data * batch["atom37_atom_exists"][..., None] return atom37_data def build_template_angle_feat(template_feats): template_aatype = template_feats["template_aatype"] torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"] alt_torsion_angles_sin_cos = template_feats[ "template_alt_torsion_angles_sin_cos" ] torsion_angles_mask = template_feats["template_torsion_angles_mask"] template_angle_feat = torch.cat( [ nn.functional.one_hot(template_aatype, 22), torsion_angles_sin_cos.reshape( *torsion_angles_sin_cos.shape[:-2], 14 ), alt_torsion_angles_sin_cos.reshape( *alt_torsion_angles_sin_cos.shape[:-2], 14 ), torsion_angles_mask, ], dim=-1, ) return template_angle_feat def build_template_pair_feat( batch, min_bin, max_bin, no_bins, use_unit_vector=False, eps=1e-20, inf=1e8 ): template_mask = batch["template_pseudo_beta_mask"] template_mask_2d = template_mask[..., None] * template_mask[..., None, :] # Compute distogram (this seems to differ slightly from Alg. 5) tpb = batch["template_pseudo_beta"] dgram = torch.sum( (tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True ) lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2 upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1) dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype) to_concat = [dgram, template_mask_2d[..., None]] aatype_one_hot = nn.functional.one_hot( batch["template_aatype"], rc.restype_num + 2, ) n_res = batch["template_aatype"].shape[-1] to_concat.append( aatype_one_hot[..., None, :, :].expand( *aatype_one_hot.shape[:-2], n_res, -1, -1 ) ) to_concat.append( aatype_one_hot[..., None, :].expand( *aatype_one_hot.shape[:-2], -1, n_res, -1 ) ) n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]] rigids = Rigid.make_transform_from_reference( n_xyz=batch["template_all_atom_positions"][..., n, :], ca_xyz=batch["template_all_atom_positions"][..., ca, :], c_xyz=batch["template_all_atom_positions"][..., c, :], eps=eps, ) points = rigids.get_trans()[..., None, :, :] rigid_vec = rigids[..., None].invert_apply(points) inv_distance_scalar = torch.rsqrt(eps + torch.sum(rigid_vec ** 2, dim=-1)) t_aa_masks = batch["template_all_atom_mask"] template_mask = ( t_aa_masks[..., n] * t_aa_masks[..., ca] * t_aa_masks[..., c] ) template_mask_2d = template_mask[..., None] * template_mask[..., None, :] inv_distance_scalar = inv_distance_scalar * template_mask_2d unit_vector = rigid_vec * inv_distance_scalar[..., None] if(not use_unit_vector): unit_vector = unit_vector * 0. to_concat.extend(torch.unbind(unit_vector[..., None, :], dim=-1)) to_concat.append(template_mask_2d[..., None]) act = torch.cat(to_concat, dim=-1) act = act * template_mask_2d[..., None] return act def build_extra_msa_feat(batch): msa_1hot = nn.functional.one_hot(batch["extra_msa"], 23) msa_feat = [ msa_1hot, batch["extra_has_deletion"].unsqueeze(-1), batch["extra_deletion_value"].unsqueeze(-1), ] return torch.cat(msa_feat, dim=-1) def torsion_angles_to_frames( r: Rigid, alpha: torch.Tensor, aatype: torch.Tensor, rrgdf: torch.Tensor, ): # [*, N, 8, 4, 4] default_4x4 = rrgdf[aatype, ...] # [*, N, 8] transformations, i.e. # One [*, N, 8, 3, 3] rotation matrix and # One [*, N, 8, 3] translation matrix default_r = r.from_tensor_4x4(default_4x4) bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2)) bb_rot[..., 1] = 1 # [*, N, 8, 2] alpha = torch.cat( [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2 ) # [*, N, 8, 3, 3] # Produces rotation matrices of the form: # [ # [1, 0 , 0 ], # [0, a_2,-a_1], # [0, a_1, a_2] # ] # This follows the original code rather than the supplement, which uses # different indices. all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape) all_rots[..., 0, 0] = 1 all_rots[..., 1, 1] = alpha[..., 1] all_rots[..., 1, 2] = -alpha[..., 0] all_rots[..., 2, 1:] = alpha
all_rots = Rigid(Rotation(rot_mats=all_rots), None)
1
2023-12-25 09:29:36+00:00
16k
KyanChen/TTP
mmpretrain/models/multimodal/clip/clip.py
[ { "identifier": "CIFAR100_CATEGORIES", "path": "mmpretrain/datasets/categories.py", "snippet": "CIFAR100_CATEGORIES = (\n 'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle',\n 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel',\n 'can', 'castle', '...
from abc import abstractmethod from typing import List, Optional, Tuple, Union from mmengine.model import BaseModel from torch import nn from mmpretrain.datasets.categories import (CIFAR100_CATEGORIES, IMAGENET_SIMPLE_CATEGORIES) from mmpretrain.registry import MODELS, TOKENIZER from mmpretrain.structures import DataSample from mmpretrain.utils import track_on_main_process from .utils import (OPENAI_CIFAR100_PROMPT, OPENAI_IMAGENET_PROMPT, OPENAI_IMAGENET_PROMPT_SUB) import numpy as np import torch import torch.nn.functional as F
10,842
def initialize_parameters(self) -> None: """Initialize the parameters. The pretrained weight will override the initialized parameters by this function. """ nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.positional_embedding, std=0.01) proj_std = (self.transformer.width**-0.5) * ( (2 * self.transformer.layers)**-0.5) attn_std = self.transformer.width**-0.5 fc_std = (2 * self.transformer.width)**-0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) if self.text_projection is not None: nn.init.normal_( self.text_projection, std=self.transformer.width**-0.5) def build_attention_mask(self): # lazily create causal attention mask, # with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float('-inf')) mask.triu_(1) # zero out the lower diagonal return mask def forward( self, images: torch.Tensor, data_samples: Optional[list] = None, mode: str = 'predict', **kwargs, ): """The unified entry for a forward process in both training and test. The method accepts the following modes: - "predict": Forward and return a list of data samples contain the predict results. Args: images (torch.Tensor): the preprocessed image tensor of shape ``(N, C, H, W)``. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. """ if mode == 'predict': return self.predict(images, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}".') def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: """The function to extract image latent features.""" return self.visual_proj(self.visual(images))[0] def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: """The function to extract text latent features.""" x = self.token_embedding(texts) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x)[0] x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # x.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding # (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), texts.argmax(dim=-1)] @ self.text_projection return x def extract_feat( self, images: torch.Tensor, texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: """The function to extract image and text latent features, the input image or text can not both be None.""" assert images is not None or texts is not None, \ 'text and image cannot both be None!' if images is None: return self.extract_text_feat(texts) elif texts is None: return self.extract_image_feat(images) image_features = self.extract_image_feat(images) text_features = self.extract_text_feat(texts) image_features = image_features / image_features.norm( dim=-1, keepdim=True) text_features = text_features / text_features.norm( dim=-1, keepdim=True) return image_features, text_features def compute_similarity(self, images, texts): """Extract images and texts features and compute cosine similarity.""" image_features, text_features = self.extract_feat( images=images, texts=texts) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_image = logit_scale * image_features @ text_features.t() logits_per_text = logits_per_image.t() # shape (N, N) return logits_per_image, logits_per_text @abstractmethod def predict(self, images: torch.Tensor,
# Copyright (c) OpenMMLab. All rights reserved. CIFAR100_CATEGORIES = [' '.join(c.split('_')) for c in CIFAR100_CATEGORIES] PROTOTYPE_MAP = { 'imagenet': IMAGENET_SIMPLE_CATEGORIES, 'cifar100': CIFAR100_CATEGORIES, } PROMPT_MAP = { 'openai_imagenet': OPENAI_IMAGENET_PROMPT, 'openai_cifar100': OPENAI_CIFAR100_PROMPT, 'vanilla': [lambda c: f'a photo of a {c}'], 'openai_imagenet_sub': OPENAI_IMAGENET_PROMPT_SUB } class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward function.""" orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class CLIP(BaseModel): """The implementation of `CLIP <https://arxiv.org/abs/2103.00020>`_. Args: vision_backbone (dict): Config dict for vision backbone. text_backbone (dict): Config dict for text backbone. tokenizer (dict): Config dict for text tokenizer. proj_dim (int): Projection dimension for similarity computation. text_prototype (str): Text prototype, which can be a key in `PROTOTYPE_MAP` or list of text. text_prompt (str): The prompt for text prototype. Defaults to 'vanilla',which refers to "a photo of {cls}". context_length (int): The context length to use. Defaults to 77. data_preprocessor (Union[dict, nn.Module], optional): The config for preprocessing input data. If None or no specified type, it will use "MultiModalDataPreprocessor" as type. See :class:`MultiModalDataPreprocessor` for more details. Defaults to None. init_cfg (dict, optional): The config to control the initialization. Defaults to None. """ def __init__(self, vision_backbone: dict, projection: dict, text_backbone: dict, tokenizer: dict, vocab_size: int, transformer_width: int, proj_dim: int, context_length: int = 77, data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None): if data_preprocessor is None: data_preprocessor = {} data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') data_preprocessor = MODELS.build(data_preprocessor) super().__init__( data_preprocessor=data_preprocessor, init_cfg=init_cfg) self.context_length = context_length # build the vision transformer self.visual = MODELS.build(vision_backbone) # build the visual projection self.visual_proj = MODELS.build(projection) # build attn_mask for casual-attn text_backbone['attn_mask'] = self.build_attention_mask() # build the text transformer self.transformer = MODELS.build(text_backbone) self.vocab_size = vocab_size self.token_embedding = nn.Embedding(vocab_size, transformer_width) self.positional_embedding = nn.Parameter( torch.empty(self.context_length, transformer_width)) self.ln_final = LayerNorm(transformer_width) self.text_projection = nn.Parameter( torch.empty(transformer_width, proj_dim)) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) self.initialize_parameters() self.tokenizer = TOKENIZER.build(tokenizer) self.tokenizer.vocab = self.tokenizer.get_vocab( ) # CLIPTokenizer has no attribute named 'vocab', so manually def initialize_parameters(self) -> None: """Initialize the parameters. The pretrained weight will override the initialized parameters by this function. """ nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.positional_embedding, std=0.01) proj_std = (self.transformer.width**-0.5) * ( (2 * self.transformer.layers)**-0.5) attn_std = self.transformer.width**-0.5 fc_std = (2 * self.transformer.width)**-0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) if self.text_projection is not None: nn.init.normal_( self.text_projection, std=self.transformer.width**-0.5) def build_attention_mask(self): # lazily create causal attention mask, # with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float('-inf')) mask.triu_(1) # zero out the lower diagonal return mask def forward( self, images: torch.Tensor, data_samples: Optional[list] = None, mode: str = 'predict', **kwargs, ): """The unified entry for a forward process in both training and test. The method accepts the following modes: - "predict": Forward and return a list of data samples contain the predict results. Args: images (torch.Tensor): the preprocessed image tensor of shape ``(N, C, H, W)``. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. """ if mode == 'predict': return self.predict(images, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}".') def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: """The function to extract image latent features.""" return self.visual_proj(self.visual(images))[0] def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: """The function to extract text latent features.""" x = self.token_embedding(texts) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x)[0] x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # x.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding # (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), texts.argmax(dim=-1)] @ self.text_projection return x def extract_feat( self, images: torch.Tensor, texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: """The function to extract image and text latent features, the input image or text can not both be None.""" assert images is not None or texts is not None, \ 'text and image cannot both be None!' if images is None: return self.extract_text_feat(texts) elif texts is None: return self.extract_image_feat(images) image_features = self.extract_image_feat(images) text_features = self.extract_text_feat(texts) image_features = image_features / image_features.norm( dim=-1, keepdim=True) text_features = text_features / text_features.norm( dim=-1, keepdim=True) return image_features, text_features def compute_similarity(self, images, texts): """Extract images and texts features and compute cosine similarity.""" image_features, text_features = self.extract_feat( images=images, texts=texts) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_image = logit_scale * image_features @ text_features.t() logits_per_text = logits_per_image.t() # shape (N, N) return logits_per_image, logits_per_text @abstractmethod def predict(self, images: torch.Tensor,
data_samples: DataSample = None) -> DataSample:
4
2023-12-23 08:36:47+00:00
16k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, ...
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
13,247
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False)
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False)
net_v = VisemesNet(hps.model.hidden_channels).cuda()
9
2023-12-27 03:09:11+00:00
16k
chinhsuanwu/ifusion-threestudio
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,692
padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
2
2023-12-27 20:30:33+00:00
16k