repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
AMAAI-Lab/mustango
diffusers/src/diffusers/models/transformer_2d.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes...
from dataclasses import dataclass from typing import Any, Dict, Optional from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..models.embeddings import ImagePositionalEmbeddings from ..utils import BaseOutput, deprecate from .attention import BasicTransformerBlock from .embeddings import PatchEmbed from .modeling_utils import ModelMixin import torch import torch.nn.functional as F
18,093
class Transformer2DModel(ModelMixin, ConfigMixin): """ Transformer model for image-like data. Takes either discrete (classes of vector embeddings) or continuous (actual embeddings) inputs. When input is continuous: First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image. When input is discrete: First, input (classes of latent pixels) is converted to embeddings and has positional embeddings applied, see `ImagePositionalEmbeddings`. Then apply standard transformer action. Finally, predict classes of unnoised image. Note that it is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image do not contain a prediction for the masked pixel as the unnoised image cannot be masked. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input and output. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*): Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. """ @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", norm_elementwise_affine: bool = True, ): super().__init__() self.use_linear_projection = use_linear_projection self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` # Define whether input is continuous or discrete depending on configuration self.is_input_continuous = (in_channels is not None) and (patch_size is None) self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None if norm_type == "layer_norm" and num_embeds_ada_norm is not None: deprecation_message = ( f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config." " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" " would be very nice if you could open a Pull request for the `transformer/config.json` file" ) deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) norm_type = "ada_norm" if self.is_input_continuous and self.is_input_vectorized: raise ValueError( f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" " sure that either `in_channels` or `num_vector_embeds` is None." ) elif self.is_input_vectorized and self.is_input_patches: raise ValueError( f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" " sure that either `num_vector_embeds` or `num_patches` is None." ) elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: raise ValueError( f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." ) # 2. Define input layers if self.is_input_continuous: self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) if use_linear_projection: self.proj_in = nn.Linear(in_channels, inner_dim) else: self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) elif self.is_input_vectorized: assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed" self.height = sample_size self.width = sample_size self.num_vector_embeds = num_vector_embeds self.num_latent_pixels = self.height * self.width
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class Transformer2DModelOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): Hidden states conditioned on `encoder_hidden_states` input. If discrete, returns probability distributions for the unnoised latent pixels. """ sample: torch.FloatTensor class Transformer2DModel(ModelMixin, ConfigMixin): """ Transformer model for image-like data. Takes either discrete (classes of vector embeddings) or continuous (actual embeddings) inputs. When input is continuous: First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image. When input is discrete: First, input (classes of latent pixels) is converted to embeddings and has positional embeddings applied, see `ImagePositionalEmbeddings`. Then apply standard transformer action. Finally, predict classes of unnoised image. Note that it is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image do not contain a prediction for the masked pixel as the unnoised image cannot be masked. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input and output. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*): Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. """ @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", norm_elementwise_affine: bool = True, ): super().__init__() self.use_linear_projection = use_linear_projection self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` # Define whether input is continuous or discrete depending on configuration self.is_input_continuous = (in_channels is not None) and (patch_size is None) self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None if norm_type == "layer_norm" and num_embeds_ada_norm is not None: deprecation_message = ( f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config." " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" " would be very nice if you could open a Pull request for the `transformer/config.json` file" ) deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) norm_type = "ada_norm" if self.is_input_continuous and self.is_input_vectorized: raise ValueError( f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" " sure that either `in_channels` or `num_vector_embeds` is None." ) elif self.is_input_vectorized and self.is_input_patches: raise ValueError( f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" " sure that either `num_vector_embeds` or `num_patches` is None." ) elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: raise ValueError( f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." ) # 2. Define input layers if self.is_input_continuous: self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) if use_linear_projection: self.proj_in = nn.Linear(in_channels, inner_dim) else: self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) elif self.is_input_vectorized: assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed" self.height = sample_size self.width = sample_size self.num_vector_embeds = num_vector_embeds self.num_latent_pixels = self.height * self.width
self.latent_image_embedding = ImagePositionalEmbeddings(
2
2023-11-14 23:29:31+00:00
24k
BraveGroup/Drive-WM
src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
[ { "identifier": "ConfigMixin", "path": "src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.sa...
import math import numpy as np import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin from .modeling_wuerstchen_common import AttnBlock, GlobalResponseNorm, TimestepBlock, WuerstchenLayerNorm
18,014
# Copyright (c) 2023 Dominic Rampas MIT License # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): @register_to_config def __init__( self, c_in=4, c_out=4, c_r=64, patch_size=2, c_cond=1024, c_hidden=[320, 640, 1280, 1280], nhead=[-1, 10, 20, 20], blocks=[4, 4, 14, 4], level_config=["CT", "CTA", "CTA", "CTA"], inject_effnet=[False, True, True, True], effnet_embd=16, clip_embd=1024, kernel_size=3, dropout=0.1, ): super().__init__() self.c_r = c_r self.c_cond = c_cond if not isinstance(dropout, list): dropout = [dropout] * len(c_hidden) # CONDITIONING self.clip_mapper = nn.Linear(clip_embd, c_cond) self.effnet_mappers = nn.ModuleList( [ nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None for inject in inject_effnet + list(reversed(inject_effnet)) ] ) self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) self.embedding = nn.Sequential( nn.PixelUnshuffle(patch_size), nn.Conv2d(c_in * (patch_size**2), c_hidden[0], kernel_size=1),
# Copyright (c) 2023 Dominic Rampas MIT License # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): @register_to_config def __init__( self, c_in=4, c_out=4, c_r=64, patch_size=2, c_cond=1024, c_hidden=[320, 640, 1280, 1280], nhead=[-1, 10, 20, 20], blocks=[4, 4, 14, 4], level_config=["CT", "CTA", "CTA", "CTA"], inject_effnet=[False, True, True, True], effnet_embd=16, clip_embd=1024, kernel_size=3, dropout=0.1, ): super().__init__() self.c_r = c_r self.c_cond = c_cond if not isinstance(dropout, list): dropout = [dropout] * len(c_hidden) # CONDITIONING self.clip_mapper = nn.Linear(clip_embd, c_cond) self.effnet_mappers = nn.ModuleList( [ nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None for inject in inject_effnet + list(reversed(inject_effnet)) ] ) self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) self.embedding = nn.Sequential( nn.PixelUnshuffle(patch_size), nn.Conv2d(c_in * (patch_size**2), c_hidden[0], kernel_size=1),
WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6),
6
2023-11-18 01:40:55+00:00
24k
wjun0830/CGDETR
cg_detr/train.py
[ { "identifier": "BaseOptions", "path": "cg_detr/config.py", "snippet": "class BaseOptions(object):\n saved_option_filename = \"opt.json\"\n ckpt_filename = \"model.ckpt\"\n tensorboard_log_dir = \"tensorboard_log\"\n train_log_filename = \"train.log.txt\"\n eval_log_filename = \"eval.log....
import os import time import json import pprint import random import numpy as np import torch import torch.nn as nn import torch.backends.cudnn as cudnn import logging import sys from tqdm import tqdm, trange from collections import defaultdict from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from cg_detr.config import BaseOptions from cg_detr.start_end_dataset import \ StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.inference import eval_epoch, start_inference, setup_model from utils.basic_utils import AverageMeter, dict_to_markdown from utils.model_utils import count_parameters
14,837
if opt.debug: break tb_writer.close() def train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = 5 if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) # stop_score = metrics["brief"]["MR-full-mAP"] stop_score = metrics["brief"]["mAP"] if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...")
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def set_seed(seed, use_cuda=True): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if use_cuda: torch.cuda.manual_seed_all(seed) def train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer): logger.info(f"[Epoch {epoch_i+1}]") model.train() criterion.train() # init meters time_meters = defaultdict(AverageMeter) loss_meters = defaultdict(AverageMeter) num_training_examples = len(train_loader) timer_dataloading = time.time() for batch_idx, batch in tqdm(enumerate(train_loader), desc="Training Iteration", total=num_training_examples): time_meters["dataloading_time"].update(time.time() - timer_dataloading) timer_start = time.time() model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory) time_meters["prepare_inputs_time"].update(time.time() - timer_start) timer_start = time.time() outputs = model(**model_inputs, targets=targets) loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) time_meters["model_forward_time"].update(time.time() - timer_start) timer_start = time.time() optimizer.zero_grad() losses.backward() if opt.grad_clip > 0: nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) optimizer.step() time_meters["model_backward_time"].update(time.time() - timer_start) loss_dict["loss_overall"] = float(losses) # for logging only for k, v in loss_dict.items(): loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) timer_dataloading = time.time() if opt.debug and batch_idx == 3: break # print/add logs tb_writer.add_scalar("Train/lr", float(optimizer.param_groups[0]["lr"]), epoch_i+1) for k, v in loss_meters.items(): tb_writer.add_scalar("Train/{}".format(k), v.avg, epoch_i+1) to_write = opt.train_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i+1, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in loss_meters.items()])) with open(opt.train_log_filepath, "a") as f: f.write(to_write) logger.info("Epoch time stats:") for name, meter in time_meters.items(): d = {k: f"{getattr(meter, k):.4f}" for k in ["max", "min", "avg"]} logger.info(f"{name} ==> {d}") def train(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = opt.eval_epoch if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) if opt.dset_name in ['hl']: stop_score = metrics["brief"]["MR-full-mAP"] else: stop_score = (metrics["brief"]["MR-full-R1@0.7"] + metrics["brief"]["MR-full-R1@0.5"]) / 2 if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) # save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain # if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies # checkpoint = { # "model": model.state_dict(), # "optimizer": optimizer.state_dict(), # "epoch": epoch_i, # "opt": opt # } # torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = 5 if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) # stop_score = metrics["brief"]["MR-full-mAP"] stop_score = metrics["brief"]["mAP"] if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...")
opt = BaseOptions().parse()
0
2023-11-10 12:45:25+00:00
24k
ej0cl6/TextEE
TextEE/models/OneIE/E2Etrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, con...
import os, sys, logging, tqdm, pprint, copy import torch import numpy as np import ipdb from transformers import (BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, AutoTokenizer, AdamW, get_linear_schedule_with_warmup) from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .E2Emodel import OneIEE2EModel from .data import IEDataset from .util import generate_vocabs, load_valid_patterns, save_result, best_score_by_task from .scorer import score_graphs from scorer import compute_f1, print_scores
15,302
logger = logging.getLogger(__name__) class OneIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else:
logger = logging.getLogger(__name__) class OneIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else:
self.valid_patterns = load_valid_patterns(self.config.valid_pattern_path, self.vocabs)
4
2023-11-15 21:32:56+00:00
24k
ahayler/s4c
models/bts/trainer_overfit.py
[ { "identifier": "make_datasets", "path": "datasets/data_util.py", "snippet": "def make_datasets(config):\n type = config.get(\"type\", \"KITTI_Raw\")\n if type == \"KITTI_Odometry\":\n train_dataset = KittiOdometryDataset(\n base_path=config[\"data_path\"],\n frame_cou...
import math import ignite.distributed as idist import torch import numpy as np from copy import copy from typing import Optional, Union, Iterable, Sequence from ignite.contrib.handlers import TensorboardLogger from ignite.engine import Engine from matplotlib import pyplot as plt from torch import optim, nn from torch.utils.data import DataLoader, Dataset, Sampler from torch.utils.data.dataloader import T_co, _collate_fn_t, _worker_init_fn_t from torchvision.utils import make_grid from datasets.data_util import make_datasets from models.common.model.scheduler import make_scheduler from models.common.render import NeRFRenderer from models.bts.model.loss import ReconstructionLoss from models.bts.trainer import get_metrics, BTSWrapper, BTSNet from scripts.inference_setup import render_profile from utils.array_operations import map_fn, unsqueezer, to from utils.base_trainer import base_training from utils.plotting import color_tensor, color_segmentation_tensor
16,170
class EncoderDummy(nn.Module): def __init__(self, size, feat_dim, num_views=1) -> None: super().__init__() self.feats = nn.Parameter(torch.randn(num_views, feat_dim, *size)) self.latent_size = feat_dim def forward(self, x): n = x.shape[0] return [self.feats.expand(n, -1, -1, -1)] class DataloaderDummy(DataLoader): def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None, num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, multiprocessing_context=None, generator=None, *, prefetch_factor: int = 2, persistent_workers: bool = False, pin_memory_device: str = ""): super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn, multiprocessing_context, generator, prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device) self.element = to(map_fn(map_fn(dataset.__getitem__(0), torch.tensor), unsqueezer), "cuda:0") def _get_iterator(self): return iter([self.element]) def __iter__(self): return super().__iter__() def __len__(self) -> int: return 1 class BTSWrapperOverfit(BTSWrapper): def __init__(self, renderer, config, eval_nvs=False, size=None) -> None: super().__init__(renderer, config, eval_nvs) self.encoder_dummy = EncoderDummy(size, config["encoder"]["d_out"], num_views=1) self.renderer.net.encoder = self.encoder_dummy self.renderer.net.flip_augmentation = False def training(local_rank, config): return base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize) def get_dataflow(config, logger=None): # - Get train/test datasets if idist.get_local_rank() > 0: # Ensure that only local rank 0 download the dataset # Thus each node will download a copy of the dataset idist.barrier() train_dataset, _ = make_datasets(config["data"]) train_dataset.load_kitti_360_segmentation_gt = True train_dataset.length = 1 train_dataset._skip = config["data"].get("skip", 0) vis_dataset = copy(train_dataset) test_dataset = copy(train_dataset) vis_dataset.return_depth = True test_dataset.return_depth = True if idist.get_local_rank() == 0: # Ensure that only local rank 0 download the dataset idist.barrier() # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu train_loader = DataloaderDummy(train_dataset) test_loader = DataloaderDummy(test_dataset) vis_loader = DataloaderDummy(vis_dataset) return train_loader, test_loader, vis_loader def initialize(config: dict, logger=None): arch = config["model_conf"].get("arch", "BTSNet") net = globals()[arch](config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() mode = config.get("mode", "depth") model = BTSWrapperOverfit( renderer, config["model_conf"], mode == "nvs", size=config["data"].get("image_size", (192, 640)) ) model = idist.auto_model(model) optimizer = optim.Adam(model.parameters(), lr=config["learning_rate"]) optimizer = idist.auto_optim(optimizer) lr_scheduler = make_scheduler(config.get("scheduler", {}), optimizer)
class EncoderDummy(nn.Module): def __init__(self, size, feat_dim, num_views=1) -> None: super().__init__() self.feats = nn.Parameter(torch.randn(num_views, feat_dim, *size)) self.latent_size = feat_dim def forward(self, x): n = x.shape[0] return [self.feats.expand(n, -1, -1, -1)] class DataloaderDummy(DataLoader): def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None, num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, multiprocessing_context=None, generator=None, *, prefetch_factor: int = 2, persistent_workers: bool = False, pin_memory_device: str = ""): super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn, multiprocessing_context, generator, prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device) self.element = to(map_fn(map_fn(dataset.__getitem__(0), torch.tensor), unsqueezer), "cuda:0") def _get_iterator(self): return iter([self.element]) def __iter__(self): return super().__iter__() def __len__(self) -> int: return 1 class BTSWrapperOverfit(BTSWrapper): def __init__(self, renderer, config, eval_nvs=False, size=None) -> None: super().__init__(renderer, config, eval_nvs) self.encoder_dummy = EncoderDummy(size, config["encoder"]["d_out"], num_views=1) self.renderer.net.encoder = self.encoder_dummy self.renderer.net.flip_augmentation = False def training(local_rank, config): return base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize) def get_dataflow(config, logger=None): # - Get train/test datasets if idist.get_local_rank() > 0: # Ensure that only local rank 0 download the dataset # Thus each node will download a copy of the dataset idist.barrier() train_dataset, _ = make_datasets(config["data"]) train_dataset.load_kitti_360_segmentation_gt = True train_dataset.length = 1 train_dataset._skip = config["data"].get("skip", 0) vis_dataset = copy(train_dataset) test_dataset = copy(train_dataset) vis_dataset.return_depth = True test_dataset.return_depth = True if idist.get_local_rank() == 0: # Ensure that only local rank 0 download the dataset idist.barrier() # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu train_loader = DataloaderDummy(train_dataset) test_loader = DataloaderDummy(test_dataset) vis_loader = DataloaderDummy(vis_dataset) return train_loader, test_loader, vis_loader def initialize(config: dict, logger=None): arch = config["model_conf"].get("arch", "BTSNet") net = globals()[arch](config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() mode = config.get("mode", "depth") model = BTSWrapperOverfit( renderer, config["model_conf"], mode == "nvs", size=config["data"].get("image_size", (192, 640)) ) model = idist.auto_model(model) optimizer = optim.Adam(model.parameters(), lr=config["learning_rate"]) optimizer = idist.auto_optim(optimizer) lr_scheduler = make_scheduler(config.get("scheduler", {}), optimizer)
criterion = ReconstructionLoss(config["loss"], config["model_conf"].get("use_automasking", False))
3
2023-11-12 21:53:27+00:00
24k
newcastleuniversity/DISPEL
dispel/processing/extract.py
[ { "identifier": "EntityType", "path": "dispel/data/core.py", "snippet": "class ReadingSchema:\nclass Evaluation(Epoch):\nclass Session(Epoch):\nclass Reading(FlagMixIn):\n def __init__(\n self,\n *args,\n uuid: str,\n finished: Optional[bool] = None,\n exit_reason: ...
import inspect import math import warnings import numpy as np import pandas as pd from typing import ( Any, Callable, Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Union, cast, ) from deprecated import deprecated from dispel.data.core import EntityType, Reading from dispel.data.flags import Flag, FlagSeverity, FlagType, WrappedResult from dispel.data.levels import Level from dispel.data.measures import ( MeasureId, MeasureSet, MeasureValue, MeasureValueDefinition, ) from dispel.data.values import AbbreviatedValue as AV from dispel.data.values import ( DefinitionId, DefinitionIdType, ValueDefinition, ValueDefinitionPrototype, ) from dispel.processing.core import ( ErrorHandling, ProcessingControlResult, ProcessingResult, ProcessingStep, ProcessResultType, ) from dispel.processing.data_set import ( MutateDataSetProcessingStepBase, TransformationFunctionGeneratorType, WrapResultGeneratorType, ) from dispel.processing.flags import FlagStepMixin from dispel.processing.level import LevelFilterType, LevelProcessingResult from dispel.processing.transform import TransformStepChainMixIn from dispel.stats.core import iqr, npcv, percentile_95, variation, variation_increase
15,453
... 'data-set-id', ... [ ... {'func': np.mean, 'method': 'average'}, ... {'func': np.median, 'method': 'median'} ... ], ... ValueDefinitionPrototype( ... id_='measure-{method}', ... name='{method} measure', ... unit='s' ... ) ... ) This extraction step will result in two measure values, one for the mean and one with the median. """ transform_functions: Iterable[Dict[str, Any]] def __init__( self, data_set_ids: Optional[Union[str, Iterable[str]]] = None, transform_functions: Optional[Iterable[Dict[str, Any]]] = None, definition: Optional[ValueDefinitionPrototype] = None, level_filter: Optional[LevelFilterType] = None, yield_if_nan: Optional[bool] = None, ): super().__init__( definition=definition, data_set_ids=data_set_ids, level_filter=level_filter, yield_if_nan=yield_if_nan, ) if transform_functions: self.transform_functions = transform_functions def get_transform_functions(self) -> TransformationFunctionGeneratorType: """Get the transform functions applied to the data sets.""" yield from super().get_transform_functions() for function_spec in self.transform_functions: spec = function_spec.copy() yield spec.pop("func"), spec AggregationFunctionType = Union[str, Callable[[pd.Series], float]] def agg_column( column: str, method: AggregationFunctionType ) -> Callable[[pd.DataFrame], float]: """Create a function to apply an aggregation function on a column. Parameters ---------- column The column to be aggregated method A function to apply on the column Returns ------- Callable[[pandas.DataFrame], float] A function that aggregates one column of a `~pandas.DataFrame`. """ def _function(data: pd.DataFrame) -> float: return data[column].agg(method) return _function #: A list of basic used aggregation methods BASIC_AGGREGATIONS: List[Tuple[str, str]] = [ ("mean", "mean"), ("std", "standard deviation"), ] #: A list of commonly used aggregation methods DEFAULT_AGGREGATIONS: List[Tuple[str, str]] = [ *BASIC_AGGREGATIONS, ("median", "median"), ("min", "minimum"), ("max", "maximum"), ] #: A list of commonly used aggregation methods plus coefficient of variation DEFAULT_AGGREGATIONS_CV: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (variation, "coefficient of variation"), ] #: A list of commonly used aggregation methods plus 95th percentile DEFAULT_AGGREGATIONS_Q95: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (percentile_95, "95th percentile"), ] #: A list of commonly used aggregation methods plus inter-quartile range DEFAULT_AGGREGATIONS_IQR: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (iqr, "iqr"), ] #: An extended list of commonly used aggregation methods EXTENDED_AGGREGATIONS: List[Tuple[str, str]] = [ *DEFAULT_AGGREGATIONS, ("skew", "skewness"), ("kurtosis", "kurtosis"), ] DEFAULT_AGGREGATIONS_Q95_CV: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS_Q95, (variation, "coefficient of variation"), ] #: A dictionary containing all aggregation methods AGGREGATION_REGISTRY: Dict[str, Tuple[AggregationFunctionType, str]] = { **{agg: (agg, agg_label) for agg, agg_label in EXTENDED_AGGREGATIONS}, "cv": (variation, "coefficient of variation"),
"""Extraction functionalities for processing module.""" from __future__ import annotations class MeasureDefinitionMixin: """A mixin class for processing steps producing measure values. Parameters ---------- definition An optional value definition. If no value definition is provided, the :data:`definition` class variable will be used. Alternatively, one can overwrite :meth:`get_definition` to provide the definition. """ #: The specification of the measure definition definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None def __init__(self, *args, **kwargs): definition = kwargs.pop("definition", None) self.definition = definition or self.definition super().__init__(*args, **kwargs) def get_definition(self, **kwargs) -> ValueDefinition: """Get the measure definition. Parameters ---------- kwargs Optional parameters that will be passed along to the creation of measure definitions from prototypes. See :meth:`~dispel.data.values.ValueDefinitionPrototype.create_definition` Returns ------- ValueDefinition The definition of the value """ assert ( self.definition is not None ), "Definition must be set or get_definition must be overwritten." definition = self.definition if isinstance(definition, ValueDefinitionPrototype): definition = cast(ValueDefinition, definition.create_definition(**kwargs)) return definition def get_value(self, value: Any, **kwargs) -> MeasureValue: """Get a measure value based on the definition. Parameters ---------- value The value kwargs Optional arguments passed to :meth:`get_definition`. Returns ------- MeasureValue The ``value`` wrapped with the definition from :meth:`get_definition`. """ return MeasureValue(self.get_definition(**kwargs), value) class ExtractStep( MeasureDefinitionMixin, TransformStepChainMixIn, MutateDataSetProcessingStepBase ): r"""A measure extraction processing step. This class provides a convenient way to extract a measure from one or more data sets by specifying their id, their level_ids or level filter, a transformation function and a measure value definition. Parameters ---------- data_set_ids An optional list of data set ids to be used for the transformation. See :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`. transform_function An optional function to be applied to the data sets. See :class:`~dispel.processing.data_set.MutateDataSetProcessingStepBase`. definition An optional value definition or prototype. See :class:`MeasureDefinitionMixin`. level_filter An optional filter to limit the levels being processed. See :class:`~dispel.processing.level.LevelProcessingStep`. yield_if_nan If ``True``, yield null values as measure values. Otherwise, processing will not return a measure value in case of a null result for the extraction. Examples -------- Assuming we wanted to compute the maximum value of a raw data set we can create the following step >>> from dispel.data.values import ValueDefinition >>> from dispel.processing.extract import ExtractStep >>> step = ExtractStep( ... 'data-set-id', ... lambda data: data.max(axis=0), ... ValueDefinition('maximum','Maximum value') ... ) A common approach is to define a processing step for re-use and leveraging the ``@transformation`` decorator to specify the transformation function: >>> import pandas as pd >>> from dispel.data.values import ValueDefinition >>> from dispel.processing.extract import ExtractStep >>> from dispel.processing.data_set import transformation >>> class MyExtractStep(ExtractStep): ... data_set_ids = 'data-set-id' ... definition = ValueDefinition('maximum','Maximum value') ... ... @transformation ... def _max(self, data: pd.DataFrame) -> float: ... return data.max(axis=0) Often one wants to extract multiple measures from one data set. This can be achieved by using prototypes and optional named arguments with ``@transformation``: >>> import pandas as pd >>> from dispel.data.values import ValueDefinitionPrototype >>> from dispel.processing.extract import ExtractStep >>> from dispel.processing.data_set import transformation >>> class MyExtractStep(ExtractStep): ... data_set_ids = 'data-set-id' ... definition = ValueDefinitionPrototype( ... id_='id-{agg_abbr}', ... name='{agg} value' ... ) ... ... @transformation(agg='Maximum', agg_abbr='max') ... def _max(self, data: pd.DataFrame) -> float: ... return data.max(axis=0) ... ... @transformation(agg='Minimum', agg_abbr='min') ... def _min(self, data: pd.DataFrame) -> float: ... return data.min(axis=0) """ yield_if_nan: bool = False def __init__( self, data_set_ids: Optional[Union[str, Iterable[str]]] = None, transform_function: Optional[Callable[..., Any]] = None, definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None, level_filter: Optional[LevelFilterType] = None, yield_if_nan: Optional[bool] = None, ): super().__init__( definition=definition, data_set_ids=data_set_ids, transform_function=transform_function, level_filter=level_filter, ) self.yield_if_nan = yield_if_nan or self.yield_if_nan def wrap_result( self, res: Any, level: Level, reading: Reading, **kwargs: Any ) -> WrapResultGeneratorType: """Wrap the result from the processing function into a class. Parameters ---------- res Any result returned by the extraction step. If res is a :class:`~dispel.data.flags.WrappedResult`, the flag contained in the object will be automatically added to the :class:`~dispel.data.measures.MeasureValue`, hence the flagged wrapped results will always translate into flagged :class:`~dispel.data.measures.MeasureValue`. level The current level reading The current reading kwargs Additional kwargs Yields ------ LevelProcessingResult The processing result """ try: if len(res) == 0: res = math.nan warnings.warn("Extract step returned an iterable!", UserWarning) except TypeError: pass if is_wrapped := isinstance(res, WrappedResult): measure_value = res.measure_value else: measure_value = res if not (is_nan := math.isnan(measure_value)) or (is_nan and self.yield_if_nan): value = self.get_value(measure_value, **kwargs) # If result is wrapped, add the flag to the measure value if is_wrapped: value.add_flags(res, ignore_duplicates=True) yield LevelProcessingResult( step=self, sources=self.get_raw_data_sets(level), result=value, level=level, ) @deprecated(reason="Use ExtractStep and @transformation decorator") class ExtractMultipleStep(ExtractStep): r"""A measure extraction processing step for multiple measures. This processing step allows to produce multiple :class:`~dispel.data.measures.MeasureValue`\ s by providing a list of functions and a :class:`~dispel.data.values.ValueDefinitionPrototype` to create the :class:`~dispel.data.values.ValueDefinition`\ s from. Parameters ---------- data_set_ids An optional list of data set ids to be used for the transformation. See :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`. transform_functions An optional list of dictionaries containing at least the processing function under the key ``func``, which consumes the specified data sets though ``data_set_ids`` as positional arguments and returns a measure value passed to :class:`~dispel.data.measures.MeasureValue`. Additional keywords will be passed to :meth:`~dispel.data.values.ValueDefinitionPrototype.create_definition`. If no functions are provided, the :data:`transform_functions` class variable will be used. definition A :class:`~dispel.data.values.ValueDefinitionPrototype` that is used to create the :class:`~dispel.data.measures.MeasureValueDefinition`\ s for the transformation functions provided in ``transform_functions``. level_filter An optional filter to limit the levels being processed. See :class:`~dispel.processing.level.LevelProcessingStep`. yield_if_nan If ``True``, yield null values as measure values. Otherwise, processing will not return a measure value in case of a null result for the extraction. Examples -------- To ease the generation of multiple similar measures the :class:`ExtractMultipleStep` provides a convenient way to do so. Assume you want to create both the mean and median of a data set this can be achieved as follows: >>> import numpy as np >>> from dispel.data.values import ValueDefinitionPrototype >>> from dispel.processing.extract import ExtractMultipleStep >>> step = ExtractMultipleStep( ... 'data-set-id', ... [ ... {'func': np.mean, 'method': 'average'}, ... {'func': np.median, 'method': 'median'} ... ], ... ValueDefinitionPrototype( ... id_='measure-{method}', ... name='{method} measure', ... unit='s' ... ) ... ) This extraction step will result in two measure values, one for the mean and one with the median. """ transform_functions: Iterable[Dict[str, Any]] def __init__( self, data_set_ids: Optional[Union[str, Iterable[str]]] = None, transform_functions: Optional[Iterable[Dict[str, Any]]] = None, definition: Optional[ValueDefinitionPrototype] = None, level_filter: Optional[LevelFilterType] = None, yield_if_nan: Optional[bool] = None, ): super().__init__( definition=definition, data_set_ids=data_set_ids, level_filter=level_filter, yield_if_nan=yield_if_nan, ) if transform_functions: self.transform_functions = transform_functions def get_transform_functions(self) -> TransformationFunctionGeneratorType: """Get the transform functions applied to the data sets.""" yield from super().get_transform_functions() for function_spec in self.transform_functions: spec = function_spec.copy() yield spec.pop("func"), spec AggregationFunctionType = Union[str, Callable[[pd.Series], float]] def agg_column( column: str, method: AggregationFunctionType ) -> Callable[[pd.DataFrame], float]: """Create a function to apply an aggregation function on a column. Parameters ---------- column The column to be aggregated method A function to apply on the column Returns ------- Callable[[pandas.DataFrame], float] A function that aggregates one column of a `~pandas.DataFrame`. """ def _function(data: pd.DataFrame) -> float: return data[column].agg(method) return _function #: A list of basic used aggregation methods BASIC_AGGREGATIONS: List[Tuple[str, str]] = [ ("mean", "mean"), ("std", "standard deviation"), ] #: A list of commonly used aggregation methods DEFAULT_AGGREGATIONS: List[Tuple[str, str]] = [ *BASIC_AGGREGATIONS, ("median", "median"), ("min", "minimum"), ("max", "maximum"), ] #: A list of commonly used aggregation methods plus coefficient of variation DEFAULT_AGGREGATIONS_CV: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (variation, "coefficient of variation"), ] #: A list of commonly used aggregation methods plus 95th percentile DEFAULT_AGGREGATIONS_Q95: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (percentile_95, "95th percentile"), ] #: A list of commonly used aggregation methods plus inter-quartile range DEFAULT_AGGREGATIONS_IQR: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (iqr, "iqr"), ] #: An extended list of commonly used aggregation methods EXTENDED_AGGREGATIONS: List[Tuple[str, str]] = [ *DEFAULT_AGGREGATIONS, ("skew", "skewness"), ("kurtosis", "kurtosis"), ] DEFAULT_AGGREGATIONS_Q95_CV: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS_Q95, (variation, "coefficient of variation"), ] #: A dictionary containing all aggregation methods AGGREGATION_REGISTRY: Dict[str, Tuple[AggregationFunctionType, str]] = { **{agg: (agg, agg_label) for agg, agg_label in EXTENDED_AGGREGATIONS}, "cv": (variation, "coefficient of variation"),
"cvi": (variation_increase, "coefficient of variation increase"),
17
2023-11-14 10:06:46+00:00
24k
Jisencc/yolov5_dual_weighting
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import json import os import subprocess import sys import numpy as np import torch import torch.nn.functional as F from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
17,353
assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone() scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15]) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0)
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone() scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15]) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0)
plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
15
2023-11-12 13:28:26+00:00
24k
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/vm/ark_dpa_vm_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_...
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy from ark_sdk_python.models.common import ArkProtocolType, ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.vm import ( ArkDPAVMAddPolicy, ArkDPAVMAuthorizationRule, ArkDPAVMAWSProviderData, ArkDPAVMAzureProviderData, ArkDPAVMConnectionDataType, ArkDPAVMConnectionInformation, ArkDPAVMFQDNOperator, ArkDPAVMFQDNRule, ArkDPAVMFQDNRulesConjunction, ArkDPAVMGCPProviderData, ArkDPAVMLocalEphemeralUserConnectionMethodData, ArkDPAVMOnPremProviderData, ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMProvider, ArkDPAVMRDPLocalEphemeralUserConnectionData, ArkDPAVMUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.vm.ark_dpa_vm_policies_service import ArkDPAVMPoliciesService import inquirer
14,456
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData( fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR, fqdn_rules=[ArkDPAVMFQDNRule(operator=ArkDPAVMFQDNOperator.WILDCARD, computername_pattern='*', domain='default.com')], ), } DEFAULT_GENERATED_PROTOCOLS: Final[Dict[ArkProtocolType, ArkDPAVMConnectionDataType]] = { ArkProtocolType.SSH: 'root', ArkProtocolType.RDP: ArkDPAVMRDPLocalEphemeralUserConnectionData( local_ephemeral_user=ArkDPAVMLocalEphemeralUserConnectionMethodData(assign_groups={'Administrators'}) ), } SUPPORTED_SSH_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM, ] SUPPORTED_RDP_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.ONPREM] class ArkDPAVMPoliciesEditorService(
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData( fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR, fqdn_rules=[ArkDPAVMFQDNRule(operator=ArkDPAVMFQDNOperator.WILDCARD, computername_pattern='*', domain='default.com')], ), } DEFAULT_GENERATED_PROTOCOLS: Final[Dict[ArkProtocolType, ArkDPAVMConnectionDataType]] = { ArkProtocolType.SSH: 'root', ArkProtocolType.RDP: ArkDPAVMRDPLocalEphemeralUserConnectionData( local_ephemeral_user=ArkDPAVMLocalEphemeralUserConnectionMethodData(assign_groups={'Administrators'}) ), } SUPPORTED_SSH_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM, ] SUPPORTED_RDP_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.ONPREM] class ArkDPAVMPoliciesEditorService(
ArkDPABasePoliciesEditorService[ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMAddPolicy, ArkDPAVMUpdatePolicy, ArkDPAVMGeneratePolicy]
19
2023-11-13 09:24:31+00:00
24k
kampta/asic
train.py
[ { "identifier": "Logger", "path": "commons/logger.py", "snippet": "class Logger(SummaryWriter):\n\n def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):\n super().__init__(results_path)\n self.results_path = results_path\n self.log_to_tb = log_to_tb\n self...
import argparse import torch import numpy as np import json import os import torch.nn.functional as F import wandb from torch import nn, optim from tqdm import tqdm from pathlib import Path from commons.logger import Logger, log_visuals from commons.distributed import get_rank, setup_distributed, reduce_loss_dict,\ get_world_size, primary from commons.utils import sample_tuples from datasets.cub import CUBDataset from datasets.in_memory import InMemoryDataset from datasets.spair import SpairDataset from datasets.utils import Augmentor from models.utils import accumulate, requires_grad from models.canonical import Canonical, CanonicalMLP from models.asic import Asic from losses.reg_losses import total_variation_loss from thirdparty.lpips.lpips import get_perceptual_loss from losses.matching_losses import LossCorrsSparse from thirdparty.gangealing.annealing import DecayingCosineAnnealingWarmRestarts,\ lr_cycle_iters
17,268
if args.flow_ssl: # in_size = extractor.num_patches # in_ch = extractor.feat_dim # TODO: read from the file and modfiy accordingly raise NotImplementedError else: in_size = args.img_size in_ch = 3 stn = Asic( in_ch, in_size, mf=args.channel_multiplier, bilinear=args.bilinear, padding_mode=args.padding_mode, use_tanh=args.use_tanh).to(device) if args.stn_ema: t_ema = Asic( in_ch, in_size, mf=args.channel_multiplier, bilinear=args.bilinear, padding_mode=args.padding_mode).to(device) accumulate(t_ema, stn, 0) else: t_ema = stn if args.mask_weight > 0: num_ch = 4 else: num_ch = 3 if args.use_mlp: canon = CanonicalMLP( input_dim=2, output_dim=num_ch, hidden_dim=args.mlp_hidden_dim, skip_layers=args.mlp_skip_layers, num_layers=args.mlp_num_layers, resolution=args.canon_size).to(device) else: canon = Canonical((1, num_ch, args.canon_size, args.canon_size), clamp=args.clamp).to(device) if args.canon_ema: if args.use_mlp: c_ema = CanonicalMLP( input_dim=2, output_dim=num_ch, hidden_dim=args.mlp_hidden_dim, skip_layers=args.mlp_skip_layers, num_layers=args.mlp_num_layers, resolution=args.canon_size).to(device) else: c_ema = Canonical((1, num_ch, args.canon_size, args.canon_size), clamp=args.clamp).to(device) accumulate(c_ema, canon, 0) else: c_ema = canon # Setup the perceptual loss function: loss_fn = get_perceptual_loss(args.loss_fn, device) if args.nbb_weight > 0.: nbb_loss_fn = LossCorrsSparse(flow_size=in_size, T=args.sparse_temp) nbb_loss_fn = nbb_loss_fn.to(device) else: nbb_loss_fn = None if args.canon_lr == 0: requires_grad(canon, False) canon_optim = None canon_sched = None else: canon_optim = optim.Adam(canon.parameters(), lr=args.canon_lr, betas=(0.9, 0.999), eps=1e-8) canon_sched = DecayingCosineAnnealingWarmRestarts( canon_optim, T_0=1, T_mult=args.tm, decay=args.decay) if primary(): print(f"{count_parameters(stn)} parameters in STN") print(f"{count_parameters(canon)} parameters in Canonical") # Setup optimizers and learning rate schedulers: t_optim = optim.Adam(stn.parameters(), lr=args.stn_lr, betas=(0.9, 0.999), eps=1e-8) t_sched = DecayingCosineAnnealingWarmRestarts( t_optim, T_0=1, T_mult=args.tm, decay=args.decay) # See if the start iteration can be recovered when resuming training: args.start_iter = 0 # Load pre-trained generator (and optionally resume from a GANgealing checkpoint): ckpt_path = Path(args.results) / args.exp_name / 'checkpoint.pt' try: print(f"Loading model from {ckpt_path}") ckpt = torch.load(ckpt_path) canon.load_state_dict(ckpt["canon"]) c_ema.load_state_dict(ckpt["c_ema"]) stn.load_state_dict(ckpt["t"]) t_ema.load_state_dict(ckpt["t_ema"]) t_optim.load_state_dict(ckpt["t_optim"]) t_sched.load_state_dict(ckpt["t_sched"]) if canon_optim is not None: canon_optim.load_state_dict(ckpt["canon_optim"]) if canon_optim is not None: canon_sched.load_state_dict(ckpt["canon_sched"]) args.start_iter = ckpt['iter'] print(f"Checkpoint found. Resuming from {args.start_iter} iterations") except FileNotFoundError: print("No checkpoint found. Training from scratch.") except KeyError: raise Exception # Move models to DDP if distributed training is enabled: if args.distributed: local_rank = int(os.environ["LOCAL_RANK"]) stn = nn.parallel.DistributedDataParallel( stn, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) canon = nn.parallel.DistributedDataParallel( canon, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) # Setup data if args.dset.lower() == 'folder': interim_dir = Path(args.img_dir).stem flow_dir = Path(args.flow_dir) / interim_dir / f'{args.bb}_s{args.bb_stride}'
def save_state_dict(ckpt_name, c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, step, add_step_to_name=False): ckpt_dict = { "canon": c_module.state_dict(), "t": t_module.state_dict(), "c_ema": c_ema.state_dict(), "t_ema": t_ema.state_dict(), "t_optim": t_optim.state_dict(), "t_sched": t_sched.state_dict(), "canon_optim": canon_optim.state_dict() if canon_optim is not None else None, "canon_sched": canon_sched.state_dict() if canon_sched is not None else None, "args": args, "iter": step } torch.save(ckpt_dict, f'{results_path}/{ckpt_name}.pt') if add_step_to_name: torch.save(ckpt_dict, f'{results_path}/{ckpt_name}_{step:07d}.pt') def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def base_training_argparse(): parser = argparse.ArgumentParser(description="Training") # Main training arguments: parser.add_argument("--exp-name", type=str, required=True, help="Name for experiment run (used for logging)") parser.add_argument("--results", type=str, default='logs', help='path to the results directory') parser.add_argument("--seed", default=0, type=int, help='Random seed for this experiment') parser.add_argument("--dset", type=str, default='cub', choices=["cub", "spair"]) parser.add_argument("--img_dir", type=str, required=True, help="Path to real data") parser.add_argument("--flow_dir", type=str, default='processed_data', help="Path to preprocessed flows") parser.add_argument("--mask_threshold", type=int, default=1, help="Threshold for masking") parser.add_argument("--mask_bbox_pad", type=int, default=4, help="Crop with some padding") parser.add_argument("--img_size", default=256, type=int, help='resolution of real images') parser.add_argument("--iter", type=int, default=20000, help="total training iterations") parser.add_argument("--batch", type=int, default=20, help="batch size per-GPU") parser.add_argument("--num_workers", type=int, default=2, help="num workers for dataloader") # Dataset hyperparameters: parser.add_argument("--cub_idx", type=int, default=1, help="cub category") parser.add_argument("--split", default='test', choices=['test', 'val'], help='splits for training and validation') parser.add_argument("--use_coseg_masks", action='store_true') parser.add_argument("--num_parts", default=4, type=int) parser.add_argument("--spair_cat", default='cat', help="cub category") # Loss hyperparameters: parser.add_argument("--loss_fn", type=str, default='vgg_ssl', choices=['lpips', 'vgg_ssl'], help="The perceptual loss to use.") parser.add_argument("--rec_weight", type=float, default=1., help='weight for reconstruction loss') parser.add_argument("--nbb_weight", type=float, default=30., help='weight for nbb loss') parser.add_argument("--flow_tv_weight", default=15000.0, type=float, help="""Loss weighting of the Total Variation smoothness regularizer on the residual flow""") parser.add_argument("--equi_weight", default=1.0, type=float, help='Loss weighting for equivariance') parser.add_argument("--sparse_topk", type=int, default=None, help='number of sparse correspondences for loss') parser.add_argument("--sparse_temp", type=float, default=1, help='temperature for sparse loss') parser.add_argument("--mask_weight", default=0.1, type=float, help="""Loss weighting of the mask""") parser.add_argument("--parts_weight", default=10.0, type=float, help="""Loss weighting of the Parts Mask""") parser.add_argument("--use_nbb_parts", action='store_true') # Augmentation hyperparameters parser.add_argument("--jitter", default=[0.4, 0.4, 0.2, 0.1], type=float, nargs='+', help='augmentation mode') parser.add_argument("--jitter_prob", default=0.8, type=float) parser.add_argument("--gray_prob", default=0.2, type=float) parser.add_argument("--solar_prob", default=0.2, type=float) parser.add_argument("--tps_scale", default=0.4, type=float) # Canonical space parser.add_argument("--unwarp_size", type=int, default=128, help="resolution for unwarping") # Learned Grid hyperparameters parser.add_argument("--canon_size", type=int, default=256, help="resolution of canonical space") parser.add_argument("--clamp", action='store_true', help="clamp values of canonical space (-1, 1)") # MLP Hyperparams parser.add_argument("--use_mlp", action='store_true') parser.add_argument("--mlp_hidden_dim", type=int, default=256, help="number of hidden units per layer") parser.add_argument("--mlp_num_layers", type=int, default=8, help="number of layers") parser.add_argument("--mlp_skip_layers", type=int, nargs='+', default=[4, 7], help="skip layers") # Model hyperparameters: parser.add_argument("--canon_lr", type=float, default=0.003, help="base learning rate of canonical space") parser.add_argument("--canon_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_lr", type=float, default=0.003, help="base learning rate of SpatialTransformer") parser.add_argument("--flow_ssl", action='store_true', help="""If specified, apply STN on SSL features)""") parser.add_argument("--channel_multiplier", default=0.5, type=float, help='channel multiplier for smaller models') parser.add_argument("--bilinear", action='store_true', help='Apply bilinear upsample/downsample') parser.add_argument("--padding_mode", default='border', choices=['border', 'zeros', 'reflection'], type=str, help="""Padding algorithm for when the STN samples beyond image boundaries""") parser.add_argument("--use_tanh", action='store_true', help='Use tanh activation at the flow output') parser.add_argument("--disable_tps", action='store_true', help='disable tps transformations') # Backbone parameters parser.add_argument("--bb", default='dino_vits8', choices=['dino_vits8', 'dino_vits16', 'dino_vitb8', 'dino_vitb16', 'vit_small_patch8_224', 'vit_small_patch16_224', 'vit_base_patch16_224'], help='backbone models') parser.add_argument('--bb_stride', default=2, type=int, help="stride.") # Visualization hyperparameters: parser.add_argument("--vis_every", type=int, default=500, help="""frequency with which visualizations are generated during training""") parser.add_argument("--vis_denseres", type=int, default=32, help='number of sparse correspondences to visualize') parser.add_argument("--ckpt_every", type=int, default=10000, help='frequency of checkpointing during training') parser.add_argument("--log_every", default=25, type=int, help='How frequently to log data to TensorBoard') parser.add_argument("--n_sample", type=int, default=4, help="""number of images (real and fake) to generate visuals for""") parser.add_argument("--disable_wandb", action='store_true', help='Disable wandb for debugging') # Learning Rate scheduler hyperparameters: parser.add_argument("--period", default=10000, type=float, help="""Period for cosine learning rate scheduler (measured in gradient steps)""") parser.add_argument("--decay", default=0.9, type=float, help="""Decay factor for the cosine learning rate scheduler""") parser.add_argument("--tm", default=2, type=int, help="""Period multiplier for the cosine learning rate scheduler""") return parser def train(args, train_dset, canon, stn, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, loss_fn, nbb_loss_fn, device, writer): # Record modules to make saving checkpoints easier: if args.distributed: t_module = stn.module c_module = canon.module else: t_module = stn c_module = canon # Initialize Spatial Transformation Generator (Thin Plate Spline) aug = Augmentor(jitter=args.jitter, jitter_prob=args.jitter_prob, gray_prob=args.gray_prob, solar_prob=args.solar_prob, tps_scale=args.tps_scale).to(device) # A model checkpoint will be saved whenever the learning rate is zero: zero_lr_iters = lr_cycle_iters(0, args.period, args.iter, args.tm) early_ckpt_iters = set(zero_lr_iters) early_vis_iters = {100} early_vis_iters.update(early_ckpt_iters) # Initialize various training variables and constants: rec_loss = torch.tensor(0.0, device='cuda') flow_tv_loss = torch.tensor(0.0, device='cuda') nbb_loss = torch.tensor(0.0, device='cuda') equi_loss = torch.tensor(0.0, device='cuda') mask_loss = torch.tensor(0.0, device='cuda') parts_loss = torch.tensor(0.0, device='cuda') accum = 0.5 ** (32 / (10 * 1000)) # Resize function for perceptual loss if args.unwarp_size != args.img_size: scale_factor = args.unwarp_size / args.img_size resize_fn = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) else: resize_fn = nn.Identity() # Pre-load on GPU # Assuming ~30 images of size 256x256, takes up ~23 MB device memory has_gt_kp = train_dset.kps is not None all_imgs = train_dset.imgs = train_dset.imgs.to(device) # / 127.5 - 1.0 all_masks = train_dset.masks = train_dset.masks.unsqueeze(1).to(device) all_parts = train_dset.parts = train_dset.parts.to(device) if has_gt_kp: all_kps = train_dset.kps = train_dset.kps.to(device) # Pseudo GT pseudo_kps = train_dset.pseudo_kps = torch.from_numpy(train_dset.pseudo_kps).to(device) num_parts = train_dset.num_parts loss_topk = pseudo_kps.shape[2] if args.sparse_topk is None else min(args.sparse_topk, pseudo_kps.shape[2]) # Progress bar for monitoring training: pbar = range(args.start_iter, args.iter) if primary(): pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.2) pck_pairs, pck_cycles = log_visuals( c_ema, t_ema, train_dset, 0, writer, vis_sample=args.n_sample, vis_denseres=args.vis_denseres) best_pck_pairs = pck_pairs best_pck_cycles = pck_cycles requires_grad(stn, True) requires_grad(canon, True) for idx in pbar: # main training loop i = idx + args.start_iter + 1 #################################### # TRAIN STN and CANON # #################################### N = args.batch pairs = sample_tuples(len(train_dset), count=N // 2) src_idx, trg_idx = pairs[:, 0], pairs[:, 1] all_idx = np.concatenate([src_idx, trg_idx]) batch_imgs = all_imgs[all_idx] batch_parts = all_parts[all_idx] if args.use_nbb_parts: batch_masks = (batch_parts != num_parts).unsqueeze(1).float() batch_masks_resized = resize_fn(batch_masks) else: batch_masks = all_masks[all_idx] batch_masks_resized = resize_fn(batch_masks) kp1 = pseudo_kps[src_idx, trg_idx][:, :loss_topk] # (N/2, K, 4) kp2 = pseudo_kps[trg_idx, src_idx][:, :loss_topk] # (N/2, K, 4) batch_kps_vis = kp1[..., 2] > 0 # (N/2, K) batch_kps_wt = torch.ones_like(batch_kps_vis).float() # (N/2, K) batch_kps = torch.cat([kp1, kp2])[..., :2] # (N, K, 2) if args.use_nbb_parts: nbb_parts_vis = (kp1[..., 3] != args.num_parts) * (kp2[..., 3] != args.num_parts) batch_kps_wt *= nbb_parts_vis # Map the images to the canonical space flow, delta_flow = stn(batch_imgs) unwarped = canon.unwarp(flow, args.unwarp_size) # NBB weight if args.nbb_weight > 0.: nbb_loss = nbb_loss_fn(flow[:N//2], flow[N//2:], batch_kps[:N//2], batch_kps[N//2:], batch_kps_vis, batch_kps_wt) if args.equi_weight > 0.: # Apply tps transformations if args.disable_tps: batch_imgs_t = aug.forward_geom(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_geom(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_geom(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) else: batch_imgs_t = aug.forward_tps(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_tps(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_tps(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) batch_masks_t = torch.where(batch_masks_t > 0.5, 1., 0.) batch_masks_t_resized = resize_fn(batch_masks_t) vis = batch_masks_t * batch_masks # Flow of tps image flow_ft, _ = stn(batch_imgs_t) unwarped_ft = canon.unwarp(flow_ft, args.unwarp_size) equi_loss = F.l1_loss(flow_ft, flow_tf.detach(), reduction='none') \ + F.l1_loss(flow_tf, flow_ft.detach(), reduction='none') equi_loss = (equi_loss * vis.squeeze(1).unsqueeze(-1)).mean() if args.mask_weight > 0: unwarped_mask = unwarped[:, [3]] mask_loss = F.binary_cross_entropy_with_logits(unwarped_mask, batch_masks_resized) if args.equi_weight > 0.: unwarped_ft_mask = unwarped_ft[:, [3]] mask_loss = 0.5 * mask_loss + \ 0.5 * F.binary_cross_entropy_with_logits( unwarped_ft_mask, batch_masks_t_resized) # Get Total Variation Loss on flow if args.flow_tv_weight > 0: flow_tv_loss = total_variation_loss(delta_flow) # Reconstruction loss if args.rec_weight > 0: unwarped = unwarped * batch_masks_resized resized_img = resize_fn(batch_imgs) * batch_masks_resized rec_loss = loss_fn(unwarped[:, :3], resized_img).mean() if args.equi_weight > 0.: unwarped_ft = unwarped_ft * batch_masks_t_resized resized_img = resize_fn(batch_imgs_t) * batch_masks_t_resized rec_loss = 0.5*rec_loss + 0.5 * loss_fn(unwarped_ft[:, :3], resized_img).mean() # Parts Loss if args.parts_weight > 0.: # Calculate the centroid of each part part_centroids = torch.zeros(num_parts+1, 2, dtype=torch.float, device=device) part_centroids.index_add_(0, batch_parts.reshape(-1), flow.reshape(-1, 2)) part_counts = torch.bincount(batch_parts.reshape(-1)).float() part_centroids = (part_centroids/part_counts.unsqueeze(-1)).detach() # Compute the loss as the distance of the centroid from the flows parts_loss = F.l1_loss(flow, part_centroids[batch_parts], reduction='none') parts_loss = (parts_loss * batch_masks.squeeze(1).unsqueeze(-1)).mean() loss_dict = {"p": rec_loss, "ftv": flow_tv_loss, "nbb": nbb_loss, "equi": equi_loss, "mask": mask_loss, 'parts': parts_loss} canon.zero_grad() stn.zero_grad() full_stn_loss = args.rec_weight * rec_loss + \ args.flow_tv_weight * flow_tv_loss + \ args.nbb_weight * nbb_loss + args.equi_weight * equi_loss + \ args.mask_weight * mask_loss + args.parts_weight * parts_loss full_stn_loss.backward() t_optim.step() epoch = max(0, i / args.period) t_sched.step(epoch) if args.canon_lr > 0: canon_optim.step() canon_sched.step(epoch) if args.stn_ema: accumulate(t_ema, t_module, accum) if args.canon_ema: accumulate(c_ema, c_module, accum) # Aggregate loss information across GPUs loss_reduced = reduce_loss_dict(loss_dict) if primary(): # Display losses on the progress bar: perceptual_loss_val = loss_reduced["p"].mean().item() flow_tv_loss_val = loss_reduced["ftv"].mean().item() nbb_loss_val = loss_reduced["nbb"].mean().item() equi_loss_val = loss_reduced["equi"].mean().item() mask_loss_val = loss_reduced["mask"].mean().item() parts_loss_val = loss_reduced["parts"].mean().item() p_str = f"rec: {perceptual_loss_val:.4f}; " \ if args.rec_weight > 0 else "" ftv_str = f"ftv: {flow_tv_loss_val:.6f}; " \ if args.flow_tv_weight > 0 else "" nbb_str = f"nbb: {nbb_loss_val:.6f}; " \ if args.nbb_weight > 0 else "" equi_str = f"equi: {equi_loss_val:.6f}; " \ if args.equi_weight > 0 else "" mask_str = f"mask: {mask_loss_val:.6f}; " \ if args.mask_weight > 0 else "" parts_str = f"parts: {parts_loss_val:.6f}; " \ if args.parts_weight > 0 else "" pbar.set_description( f"{p_str}{nbb_str}{equi_str}{mask_str}{ftv_str}{parts_str}") # Log losses and others metrics to TensorBoard: if i % args.log_every == 0 or i in early_ckpt_iters or i == 1: writer.add_scalars('', { 'Loss/Full': full_stn_loss.item(), 'Loss/Reconstruction': perceptual_loss_val, 'Loss/TotalVariation': flow_tv_loss_val, 'Loss/NBB': nbb_loss_val, 'Loss/Equi': equi_loss_val, 'Loss/Mask': mask_loss_val, 'Loss/Parts': parts_loss_val, 'Progress/STN_LearningRate': t_sched.get_last_lr()[0], 'Progress/Canon_LearningRate': canon_sched.get_last_lr()[0] if args.canon_lr > 0 else 0. }, i) if (i % args.ckpt_every == 0 or i in early_ckpt_iters): save_state_dict( 'checkpoint', c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, i, True) if i % args.vis_every == 0 or i in early_vis_iters or i == 1: # Save visualizations to Tens orBoard if i in early_ckpt_iters: pbar.write(f'{i:07}: LR = {t_sched.get_last_lr()[0]}') pck_pairs, pck_cycles = log_visuals( c_ema, t_ema, train_dset, i, writer, vis_sample=args.n_sample, vis_denseres=args.vis_denseres) if has_gt_kp and best_pck_cycles[2][0] < pck_cycles[2][0]: best_pck_pairs = pck_pairs for k, pck_cycle in enumerate(pck_cycles): best_pck_cycles[k] = pck_cycle save_state_dict( 'best', c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, i) pck_summary = {} if has_gt_kp: pck_summary.update({ 'Progress/PCK@0.10': pck_pairs[0] * 100, 'Progress/PCK@0.01': pck_pairs[-1] * 100, 'Progress/BestPCK@0.10': best_pck_pairs[0] * 100, 'Progress/BestPCK@0.01': best_pck_pairs[-1] * 100, }) for k, pck_cycle in enumerate(pck_cycles): pck_summary[f'Progress/{k+2}-PCK@0.10'] = pck_cycle[0] * 100 pck_summary[f'Progress/{k+2}-PCK@0.01'] = pck_cycle[-1] * 100 if has_gt_kp: pck_summary[f'Progress/Best{k+2}-PCK@0.10'] = best_pck_cycles[k][0] * 100 pck_summary[f'Progress/Best{k+2}-PCK@0.01'] = best_pck_cycles[k][-1] * 100 writer.add_scalars('', pck_summary, i) if __name__ == "__main__": device = "cuda" parser = base_training_argparse() args = parser.parse_args() # Setup distributed PyTorch and create results directory: args.distributed = setup_distributed() results_path = os.path.join(args.results, args.exp_name) if primary(): # exp_id = hashlib.md5(args.exp_name.encode('utf-8')).hexdigest() use_wandb = not args.disable_wandb if use_wandb: wandb.init(project="asic", entity="kampta", name=args.exp_name, reinit=True) wandb.config.update(args) writer = Logger(results_path, log_to_wandb=use_wandb) with open(f'{results_path}/opt.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) else: writer = None # Seed RNG: torch.manual_seed(args.seed * get_world_size() + get_rank()) np.random.seed(args.seed * get_world_size() + get_rank()) # UNet output is same size as input by default # When input are SSL features, we want to upsample # the flow when loss is computed in the image space # not upsammple the flow when loss is computed in the # SSL featuremap space # Initialize U-Net for regressing flow if args.flow_ssl: # in_size = extractor.num_patches # in_ch = extractor.feat_dim # TODO: read from the file and modfiy accordingly raise NotImplementedError else: in_size = args.img_size in_ch = 3 stn = Asic( in_ch, in_size, mf=args.channel_multiplier, bilinear=args.bilinear, padding_mode=args.padding_mode, use_tanh=args.use_tanh).to(device) if args.stn_ema: t_ema = Asic( in_ch, in_size, mf=args.channel_multiplier, bilinear=args.bilinear, padding_mode=args.padding_mode).to(device) accumulate(t_ema, stn, 0) else: t_ema = stn if args.mask_weight > 0: num_ch = 4 else: num_ch = 3 if args.use_mlp: canon = CanonicalMLP( input_dim=2, output_dim=num_ch, hidden_dim=args.mlp_hidden_dim, skip_layers=args.mlp_skip_layers, num_layers=args.mlp_num_layers, resolution=args.canon_size).to(device) else: canon = Canonical((1, num_ch, args.canon_size, args.canon_size), clamp=args.clamp).to(device) if args.canon_ema: if args.use_mlp: c_ema = CanonicalMLP( input_dim=2, output_dim=num_ch, hidden_dim=args.mlp_hidden_dim, skip_layers=args.mlp_skip_layers, num_layers=args.mlp_num_layers, resolution=args.canon_size).to(device) else: c_ema = Canonical((1, num_ch, args.canon_size, args.canon_size), clamp=args.clamp).to(device) accumulate(c_ema, canon, 0) else: c_ema = canon # Setup the perceptual loss function: loss_fn = get_perceptual_loss(args.loss_fn, device) if args.nbb_weight > 0.: nbb_loss_fn = LossCorrsSparse(flow_size=in_size, T=args.sparse_temp) nbb_loss_fn = nbb_loss_fn.to(device) else: nbb_loss_fn = None if args.canon_lr == 0: requires_grad(canon, False) canon_optim = None canon_sched = None else: canon_optim = optim.Adam(canon.parameters(), lr=args.canon_lr, betas=(0.9, 0.999), eps=1e-8) canon_sched = DecayingCosineAnnealingWarmRestarts( canon_optim, T_0=1, T_mult=args.tm, decay=args.decay) if primary(): print(f"{count_parameters(stn)} parameters in STN") print(f"{count_parameters(canon)} parameters in Canonical") # Setup optimizers and learning rate schedulers: t_optim = optim.Adam(stn.parameters(), lr=args.stn_lr, betas=(0.9, 0.999), eps=1e-8) t_sched = DecayingCosineAnnealingWarmRestarts( t_optim, T_0=1, T_mult=args.tm, decay=args.decay) # See if the start iteration can be recovered when resuming training: args.start_iter = 0 # Load pre-trained generator (and optionally resume from a GANgealing checkpoint): ckpt_path = Path(args.results) / args.exp_name / 'checkpoint.pt' try: print(f"Loading model from {ckpt_path}") ckpt = torch.load(ckpt_path) canon.load_state_dict(ckpt["canon"]) c_ema.load_state_dict(ckpt["c_ema"]) stn.load_state_dict(ckpt["t"]) t_ema.load_state_dict(ckpt["t_ema"]) t_optim.load_state_dict(ckpt["t_optim"]) t_sched.load_state_dict(ckpt["t_sched"]) if canon_optim is not None: canon_optim.load_state_dict(ckpt["canon_optim"]) if canon_optim is not None: canon_sched.load_state_dict(ckpt["canon_sched"]) args.start_iter = ckpt['iter'] print(f"Checkpoint found. Resuming from {args.start_iter} iterations") except FileNotFoundError: print("No checkpoint found. Training from scratch.") except KeyError: raise Exception # Move models to DDP if distributed training is enabled: if args.distributed: local_rank = int(os.environ["LOCAL_RANK"]) stn = nn.parallel.DistributedDataParallel( stn, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) canon = nn.parallel.DistributedDataParallel( canon, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) # Setup data if args.dset.lower() == 'folder': interim_dir = Path(args.img_dir).stem flow_dir = Path(args.flow_dir) / interim_dir / f'{args.bb}_s{args.bb_stride}'
train_dset = InMemoryDataset(
9
2023-11-14 16:43:16+00:00
24k
atlantic-quantum/Shipyard
tests/printers/visualizer/test_visualize_pulse_sequences.py
[ { "identifier": "CoreType", "path": "shipyard/awg_core/awg_core.py", "snippet": "class CoreType(Enum):\n \"\"\"Enumeration of AWG Core types\"\"\"\n\n HD = \"HD\"\n QA = \"QA\"\n SG = \"SG\"" }, { "identifier": "ActivationRecord", "path": "shipyard/call_stack.py", "snippet": ...
import codecs import json import numpy as np import pytest from pathlib import Path from shipyard.awg_core.awg_core import CoreType from shipyard.call_stack import ActivationRecord, ARType from shipyard.compiler import Compiler from shipyard.duration import Duration, TimeUnits from shipyard.passes.duration_transformer import DurationTransformer from shipyard.passes.resolve_io_declaration import ResolveIODeclaration from shipyard.passes.semantic_analysis.semantic_analyzer import SemanticAnalyzer from shipyard.printers.visualizer.visualize_pulse_sequence import PulseVisualizer from shipyard.printers.zi import waveform_functions from shipyard.setup.internal import Frame, Instrument, Port, SetupInternal
17,486
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast) pv = PulseVisualizer( SetupInternal.from_json(setup_path), waveform_functions.__dict__ )
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast) pv = PulseVisualizer( SetupInternal.from_json(setup_path), waveform_functions.__dict__ )
activation_record = ActivationRecord(
1
2023-11-16 17:37:29+00:00
24k
quantuminterface/qiclib
src/qiclib/code/qi_jobs.py
[ { "identifier": "TaskRunner", "path": "src/qiclib/hardware/taskrunner.py", "snippet": "class TaskRunner(PlatformComponent):\n \"\"\"Driver to control the Taskrunner on the Hardware Platform.\"\"\"\n\n def __init__(\n self,\n name: str,\n connection,\n controller,\n ...
import os import json import functools import warnings import numpy as np import qiclib.packages.utility as util from abc import abstractmethod from typing import Dict, List, Callable, Optional, Union, Set, Any, Type from ..hardware.taskrunner import TaskRunner from ..experiment.qicode.data_provider import DataProvider from ..experiment.qicode.data_handler import DataHandler from .qi_seq_instructions import SequencerInstruction from .qi_var_definitions import ( _QiVariableBase, _QiCalcBase, _QiConstValue, QiCellProperty, QiExpression, QiVariableSet, QiCondition, ) from .qi_pulse import QiPulse from .qi_visitor import ( QiCMContainedCellVisitor, QiResultCollector, QiVarInForRange, ) from .qi_prog_builder import QiProgramBuilder from .qi_types import ( QiType, QiPostTypecheckVisitor, QiTypeFallbackVisitor, _TypeDefiningUse, ) from .qi_types import _TypeDefiningUse from .qi_types import _TypeDefiningUse from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .analysis.qi_insert_mem_parameters import ( insert_recording_offset_store_commands, insert_manipulation_pulse_frequency_store_commands, insert_readout_pulse_frequency_store_commands, ) from .qi_simulate import Simulator from ..experiment.qicode.base import QiCodeExperiment from qiclib.experiment.qicode.base import _TaskrunnerSettings from .qi_visitor import QiStringifyJob
17,259
if cell_map is None: cell_map = list(range(len(self.cells))) str_map = ", ".join([f"q[{i}] -> sample[{m}]" for i, m in enumerate(cell_map)]) exp._job_representation = f"{self}\n\nmapped as {str_map} to\n\n{sample}" return exp def _prepare_experiment_params( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): if len(self.cells) > len(controller.cell): raise IndexError( f"This job requires {len(self.cells)} cells but only " f"{len(controller.cell)} are available in the QiController." ) if data_collection is None: if self._custom_processing is None: data_collection = "average" else: data_collection = "custom" # If float, convert averages to int averages = int(averages) if sample is None: sample = QiSample(len(controller.cell)) elif len(sample) < len(self.cells): raise ValueError( "Need to submit a QiSample with at least as many cells as the job " f"has ({len(self.cells)}), but only {len(sample)} provided." ) if cell_map is None: # Use the first cells of the sample cell_map = list(range(len(self.cells))) else: if len(cell_map) != len(self.cells): raise ValueError( "cell_map needs to have as many entries as the job has cells, but " f"{len(cell_map)} entries given and {len(self.cells)} required!" ) if len(set(cell_map)) != len(cell_map): raise ValueError("Duplicate values not allowed in cell_map!") if any(m < 0 or m >= len(sample) for m in cell_map): raise IndexError( "cell_map values can only point to valid indices within the passed" f" QiSample object, i.e. values between 0 and {len(sample) - 1}." ) # Translate cell_map from sample cells ("cells") to QiController cells cell_map = [sample.cell_map[c] for c in cell_map] if any(c < 0 or c >= len(controller.cell) for c in cell_map): raise ValueError( "The QiSample cell_map can only reference available QiController " f"cells, i.e. between 0 and {len(controller.cell) - 1}." ) self._build_program(sample, cell_map) for_range_list = [] for cell in self.cells: for_range_list.append(self.cell_seq_dict[cell]._for_range_list) return ( controller, self.cells, self._get_sequencer_codes(), averages, for_range_list, cell_map, self._var_reg_map, data_collection, use_taskrunner, ) def run( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): """executes the job and returns the results :param controller: the QiController on which the job should be executed :param sample: the QiSample object used for execution of pulses and extracts parameters for the experiment :param averages: the number of executions that should be averaged, by default 1 :param cell_map: A list containing the indices of the cells :param data_collection: the data_collection mode for the result, by default "average" :param use_taskrunner: if the execution should be handled by the Taskrunner Some advanced schemes and data_collection modes are currently only supported by the Taskrunner and not yet by a native control flow. """ exp = self.create_experiment( controller, sample, averages, cell_map, data_collection, use_taskrunner ) exp.run() def run_with_data_callback(self, on_new_data: Callable[[dict], None]): pass def run_streamed(self): pass def set_custom_data_processing( self, file: str, params: Optional[List] = None, converter: Optional[Callable[[List], List]] = None,
# Copyright © 2017-2023 Quantum Interface (quantuminterface@ipe.kit.edu) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This is the main module of QiCode. Here, all important commands write QiPrograms are defined. """ class QiResult: """Result of an experiment. Can be accessed via :python:`job.cells[cell_index].data("result name")`. Where :python:`cells` denotes a :class:`QiCells` object and :python:`cell_index` an integer. The actual data can be retrieved as a numpy array using the :meth:`get` Method Example ------- .. code-block:: python qic: QiController = ... sample: QiSample = ... with QiJob() as job: q = QiCells(1) Readout(q[0], save_to="result") job.run(qic, sample, averages=1000) data = job.cells[0].data("result") :param name: The name of the variable, by default None """ def __init__(self, name: Optional[str] = None) -> None: self._cell = None self.data = None self.recording_count = 0 self.name: str = "" if name is None else name def get(self) -> np.ndarray: """gets the data of the result as a numpy array :return: The data of the experiment """ return np.array(self.data) def __str__(self) -> str: return f'QiResult("{self.name}")' class QiCommand: """Base class of every Job command. Provides _relevant_cells, containing every cell used for the execution of the command. Provides _associated_variable_set, containing every variable needed for the execution of the command. """ def __init__(self) -> None: self._associated_variable_set = QiVariableSet() self._relevant_cells: Set[QiCell] = set() @abstractmethod def accept(self, visitor, *input): raise RuntimeError( f"{self.__class__} doesn't implement `accept`. This is a bug." ) def is_variable_relevant(self, variable: _QiVariableBase) -> bool: return variable in self._associated_variable_set def add_associated_variable(self, x): if isinstance(x, _QiVariableBase): self._associated_variable_set.add(x) def __str__(self) -> str: return "cQiCommand" def _stringify(self) -> str: raise NotImplementedError(f"_stringify not implemented for {repr(self)}") _QiJobReference = None def _add_cmd_to_job(cmd: QiCommand): if _QiJobReference is None: raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID self.manipulation_pulses: List[QiPulse] = [] self.flux_pulses: List[QiPulse] = [] self.readout_pulses: List[QiPulse] = [] self._result_container: Dict[str, QiResult] = {} # The order in which recorded values are assigned to which result container self._result_recording_order: List[QiResult] = [] self._unresolved_property: Set[QiCellProperty] = set() self._job_ref = _QiJobReference self._relevant_vars: Set[_QiVariableBase] = set() # These attributes are determined by dataflow analyses self._initial_manip_freq: float = None self._initial_readout_freq: float = None self._initial_rec_offset: float = None self._rec_length: Union[int, float, QiCellProperty] = None self._properties: Dict[QiCellProperty, Any] = {} def __getitem__(self, key): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried getting values for cells registered to other QiJob" ) prop = self._properties.get(key, QiCellProperty(self, key)) if isinstance(prop, QiCellProperty): self._unresolved_property.add(key) return prop def __setitem__(self, key, value): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried setting values for cells registered to other QiJob" ) self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] def get_properties(self): return self._properties.copy() def add_pulse(self, pulse: QiPulse): if pulse not in self.manipulation_pulses: self.manipulation_pulses.append(pulse) if len(self.manipulation_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_manipulation_frequency(self): if self._initial_manip_freq is None: if len(self.manipulation_pulses) > 0: warnings.warn( "Manipulation pulses without frequency given, using 90 MHz." ) return 90e6 # Default frequency freq = self._initial_manip_freq return freq() if isinstance(freq, QiCellProperty) else freq def add_recording_length(self, length): if self._rec_length is None: self._rec_length = length elif ( not self._rec_length._equal_syntax(length) if isinstance(self._rec_length, QiExpression) else self._rec_length != length ): raise RuntimeError( f"Cell {self.cellID}: Multiple definitions of recording length used." ) def add_readout_pulse(self, pulse: QiPulse): if pulse not in self.readout_pulses: self.readout_pulses.append(pulse) if len(self.readout_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.readout_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_readout_frequency(self): if self._initial_readout_freq is None: if len(self.readout_pulses) > 0: warnings.warn("Readout pulses without frequency given, using 30 MHz.") return 30e6 # Default frequency freq = self._initial_readout_freq return freq() if isinstance(freq, QiCellProperty) else freq @property def recording_length(self): """the length of the recording pulse""" if self._rec_length is not None: return ( self._rec_length() if isinstance(self._rec_length, QiCellProperty) else self._rec_length ) return 0 @property def initial_recording_offset(self): """the recording offset in seconds""" if self._initial_rec_offset is not None: return ( self._initial_rec_offset() if isinstance(self._initial_rec_offset, QiCellProperty) else self._initial_rec_offset ) return 0 def get_result_container(self, result: str) -> QiResult: if result in self._result_container: return self._result_container[result] # was already added else: box = QiResult(result) box._cell = self self._result_container[result] = box return box def add_variable(self, var: _QiVariableBase): self._relevant_vars.add(var) def get_number_of_recordings(self): return len(self._result_recording_order) def set_default_readout(self, pulse): pass def reset(self): for container in self._result_container.values(): container.data = [] def data( self, name: Optional[str] = None ) -> Union[Dict[str, np.ndarray], np.ndarray]: """ Returns the data after running an experiment. When calling this function without a name, i.e., calling :python:`cell.data()`, returns a dictionary containing the results as numpy arrays. When calling this function with a name, i.e., calling :python:`cell.data("result_name")`, returns the whole dictionary. :param name: The name of the data :return: A single result, or a dictionary of result names mapped to results. """ if name is None: result_dict = {} for key, container in self._result_container.items(): result_dict.update({key: container.get()}) return result_dict else: return self._result_container[name].get() def _resolve_properties(self, len_dict: Dict[QiCellProperty, Any]): keys = list(self._unresolved_property) missing_keys = self._unresolved_property.difference(len_dict.keys()) if missing_keys: raise RuntimeError( f"Cell {self.cellID}: Not all properties for job could be resolved. " f"Missing properties: {missing_keys}" ) for key in keys: self._properties[key] = len_dict[key] @property def has_unresolved_properties(self): return len(self._unresolved_property) > 0 def _get_unresolved_properties(self): return [ key for key in list(self._unresolved_property) if self._properties.get(key) is None ] def __str__(self) -> str: return f"QiCell({self.cellID})" class QiCells: """ QiCells encapsulates multiple :class`QiCell` objects. It is a list-like object where the individual cells can be accessed using the index operator, i.e. .. code-block:: python cells = QiCells(5) cell0: QiCell = cells[0] cell3: QiCell = cells[3] :param num: The number of cells to create :raises RuntimeError: When the :python:`QiCells` object is instantiated outside a :python:`QiJob` """ def __init__(self, num: int) -> None: if not isinstance(_QiJobReference, QiJob): raise RuntimeError( "QiCells can only be used within QiJob description. " + "If you try to create a sample object, use the new QiSample instead." ) self.cells = [QiCell(x) for x in range(num)] _QiJobReference._register_cells(self.cells) def __getitem__(self, key): return self.cells[key] def __len__(self): return len(self.cells) class QiSampleCell: """QiSampleCell is the representation of a single qubit/cell and its properties. All necessary parameters to perform experiments can be stored here. For this purpose, the QiSampleCell can be utilized as a dictionary with user-defined keys. """ def __init__(self, cellID: int, cells_ref: "QiSample"): self.cellID = cellID self._cells_ref = cells_ref self._relevant_vars: Set[_QiVariableBase] = set() self._properties: Dict[str, Any] = {} def __getitem__(self, key): return self._properties[key] def __setitem__(self, key, value): self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] @property def qic_cell(self): return self._cells_ref.cell_map[self.cellID] def get_properties(self): return self._properties.copy() def __str__(self) -> str: return f"QiSampleCell({self.cellID})" def _export(self): return {"properties": self.get_properties()} def _import(self, prop_dict, index): if prop_dict is None: warnings.warn( f"Imported JSON string does not contain 'properties' for cell[{index}]." ) return self._properties.update(prop_dict) class QiSample: """Representation of an experiment sample and its properties. Property keys can be arbitrary strings, and property values can be anything. Set the keys using :python:`sample["property_key"] = property_value` and get the values the same way, i.e., :python:`property_value = sample["property_key"]`. Note that this class **cannot** be instantiated within a :class:`QiJob`. Instead, it must be defined outside one. Accessing samples defined here within a QiJob is still possible, however, using the :class:`QiCell` object: .. code-block:: python sample: QiSample = ... qic: QiController = ... sample["t1"] = 100e-6 with QiJob() as job: q = QiCells(1) Wait(q[0], q[0]["t1"]) job.run(qic, sample) # Note that we pass the sample object here to make the value available in the job The :python:`QiSample` object is serializable to `JSON <https://www.json.org/>`_. Have a look at the :meth:`save` and :meth:`load` methods for more :param num: The number of cells/qubits this sample has. :param cell_map: On which QiController cells these are mapped, by default [0, 1, ..., num-1] :raises RuntimeError: When the Sample is used within a :class:`QiJob` """ def __init__(self, num: int, cell_map: Optional[List[int]] = None) -> None: self._cell_map = None if _QiJobReference is not None: raise RuntimeError( "QiSample can only be used outside of QiJob to define sample " "properties. Inside a QiJob, use QiCells as placeholder for the " "qubits/cells instead." ) self.cells: List[QiSampleCell] = [] for x in range(num): self.cells.append(QiSampleCell(cellID=x, cells_ref=self)) self.cell_map = cell_map or list(range(num)) def __getitem__(self, key): return self.cells[key] def __len__(self): return len(self.cells) def __str__(self): return ( f"QiSample({len(self.cells)}, cell_map=[{','.join(map(str, self.cell_map))}]):\n" + "\n".join( [ f"[{i}]: {json.dumps(props['properties'], indent=2)}" for i, props in enumerate(self._export()["cells"]) ] ) ) def _arrange_for_controller(self) -> List[Optional[QiSampleCell]]: inverse: List[Optional[QiSampleCell]] = [None] * (max(self.cell_map) + 1) for cell, qi_cell_index in enumerate(self.cell_map): inverse[qi_cell_index] = self[cell] return inverse @property def cell_map(self): return self._cell_map @cell_map.setter def cell_map(self, cell_map): if len(cell_map) != len(self): raise ValueError( "cell_map needs to have as many entries as the there are cells, but " f"{len(cell_map)} entries given and {len(self)} required!" ) if len(set(cell_map)) != len(cell_map): raise ValueError("Duplicate values not allowed in cell_map!") if any(c < 0 for c in cell_map): raise ValueError("Cell indices inside cell_map cannot be negative!") self._cell_map = cell_map def _export(self): properties = [cell._export() for cell in self.cells] return {"cells": properties, "cell_map": self.cell_map} def _import(self, jsn_string): jsn_loaded = json.loads(jsn_string) self._evaluate_import(jsn_loaded.get("cells", None)) self.cell_map = jsn_loaded.get("cell_map", self.cell_map) def save(self, file_path: Union[str, os.PathLike], overwrite: bool = False): """ Save the sample to a file denoted by the :python:`file_path` argument in JSON format. :param file_path: Where to store the file :param overwrite: When true, allow overwriting an existing file. :raise FileExistsError: When overwrite is False and the file exists. """ mode = "w" if overwrite is True else "x" with open(file_path, mode, encoding="utf-8") as file: json.dump(self._export(), file) def load(self, file_path: Union[str, os.PathLike]): """ Loads the file at :python:`file_path` and assigns all properties of the loaded file to this :class:`QiSample` object. :param file_path: Where to look for the file """ with open(file_path, "r", encoding="utf-8") as file: self._import(file.read()) def _evaluate_import(self, sample): if sample is None: warnings.warn("Imported JSON string does not contain 'cells'.") return if len(sample) != len(self): raise ValueError( f"Imported JSON contains {len(sample)} sample cells but {len(self)} " "expected." ) for i in range(0, len(self)): self.cells[i]._import(sample[i].get("properties", None), i) class _JobDescription: """Saves experiment descriptions and handles storage of commands""" def __init__(self): self._commands: List[QiCommand] = [] self._ContextStack: List[List[QiCommand]] = [] def __getitem__(self, key): return self._commands[key] def __len__(self): return len(self._commands) def add_command(self, command): """Checks current command for used cells and raises error, if cells are not defined for current QiJob""" if isinstance(command, QiCellCommand): if _QiJobReference != command.cell._job_ref: raise RuntimeError("Cell not defined for current job") self._commands.append(command) def open_new_context(self): """Saves current commands in a stack and clears command list""" self._ContextStack.append(self._commands.copy()) self._commands = [] def close_context(self) -> List[QiCommand]: """returns the current command list, and loads the commands from top of stack""" current_commands = self._commands.copy() self._commands = self._ContextStack.pop() return current_commands def reset(self): self._commands = [] self._ContextStack = [] class QiCellCommand(QiCommand): """ Cell commands are commands using only one cell, such as Play and Wait commands. :param cell: The target cell """ def __init__(self, cell: QiCell): super().__init__() self.cell = cell self._relevant_cells.add(cell) def accept(self, visitor, *input): return visitor.visit_cell_command(self, *input) class QiVariableCommand(QiCommand): """Base class of variable commands cQiDeclare and cQiAssign""" def __init__(self, var: _QiVariableBase): super().__init__() self.var = var def accept(self, visitor, *input): return visitor.visit_variable_command(self, *input) class cQiWait(QiCellCommand): """Command generated by :meth:`Wait`""" def __init__(self, cell, length: Union[QiExpression, QiCellProperty]): super().__init__(cell) self._length = length if isinstance(length, _QiVariableBase): self.add_associated_variable(length) elif isinstance(length, _QiCalcBase): for variable in length.contained_variables: self.add_associated_variable(variable) if isinstance(length, QiExpression): length._type_info.set_type(QiType.TIME, _TypeDefiningUse.WAIT_COMMAND) @property def length(self): return ( self._length() if isinstance(self._length, QiCellProperty) else self._length ) def _stringify(self) -> str: return f"Wait({self.cell}, {self._length})" class _cQiPlay_base(QiCellCommand): """Base class of Play commands. Saves pulses, trigger_index and adds pulse variables to associated variable set """ def __init__(self, cell, pulse: QiPulse): super().__init__(cell) self.pulse = pulse # default False; Set True for certain commands when unrolling a loop with TimingVariable == 1 cycle self._var_single_cycle_trigger = False for variable in self.pulse.variables: self.add_associated_variable(variable) # length of command might differ from pulse length self._length: Union[float, _QiVariableBase, QiCellProperty] = self.pulse.length self.trigger_index = 0 @property def length(self): return ( self._length if not isinstance(self._length, QiCellProperty) else self._length() ) @length.setter def length(self, value): self._length = value class cQiPlay(_cQiPlay_base): """Command generated by Play()""" def __init__(self, cell, pulse: QiPulse): super().__init__(cell, pulse) self.trigger_index = cell.add_pulse(pulse) def _stringify(self) -> str: return f"Play({self.cell}, {self.pulse._stringify()})" class cQiPlayFlux(_cQiPlay_base): pass class cQiPlayReadout(_cQiPlay_base): """Command generated by :meth:`PlayReadout`""" def __init__(self, cell, pulse) -> None: super().__init__(cell, pulse) self.recording: Union[None, cQiRecording] = None self.trigger_index = cell.add_readout_pulse(pulse) @property def length(self): length = ( self._length if not isinstance(self._length, QiCellProperty) else self._length() ) # if Recording is defined and length is not defined by variable, compare both lengths if isinstance(self.recording, cQiRecording) and not isinstance( self._length, _QiVariableBase ): return max(length, self.recording.length) return length @length.setter def length(self, value): self._length = value if isinstance(self.recording, cQiRecording): self.recording.length = value @property def uses_state(self): return self.recording is not None and self.recording.uses_state def _stringify(self) -> str: return f"PlayReadout({self.cell}, {self.pulse._stringify()})" class cQiRotateFrame(_cQiPlay_base): """Command generated by :meth:`RotateFrame`""" def __init__(self, cell, angle: float): # Negate phase because frame needs to be shifted in the opposite direction # than pulses -> want to shift the state on bloch sphere but shift the frame pulse = QiPulse(0, phase=-1 * angle) pulse.shift_phase = True # Special property to make phase offset persistant super().__init__(cell, pulse) self.trigger_index = cell.add_pulse(pulse) self.length = util.conv_cycles_to_time(1) # command needs exactly one cycle self.angle = angle def _stringify(self) -> str: return f"RotateFrame({self.cell}, {self.angle})" class cQiSync(QiCommand): """Command generated by :meth:`Sync`""" def __init__(self, cells: List[QiCell]): super().__init__() self._relevant_cells.update(cells) def accept(self, visitor, *input): return visitor.visit_sync_command(self, *input) def _stringify(self) -> str: return ( "Sync(" + ", ".join( [ f"{cell}" for cell in sorted(self._relevant_cells, key=lambda c: c.cellID) ] ) + ")" ) class cQiRecording(QiCellCommand): """Command generated by Recording()""" def __init__( self, cell: QiCell, save_to: Union[str, _QiVariableBase, None], state_to: Union[_QiVariableBase, None], length: Union[int, float, QiCellProperty], offset: Union[int, float, QiExpression], toggleContinuous: Optional[bool] = None, ): super().__init__(cell) self.result_box = None self.var = None if ( isinstance(length, QiExpression) and length.type == QiType.STATE or isinstance(offset, QiExpression) and offset.type == QiType.STATE ): raise RuntimeError("State variable can only be used at save_to parameter.") if isinstance(state_to, _QiVariableBase): state_to._type_info.set_type( QiType.STATE, _TypeDefiningUse.RECORDING_SAVE_TO ) self.add_associated_variable(state_to) self.var = state_to self.save_to = save_to assert not isinstance( save_to, QiResult ) # support for QiResult as parameter was removed. if isinstance(save_to, _QiVariableBase): # TODO This should be deprecated and turned into new result variable # to handle I/Q values instead if necessary -> consistency if self.var is not None: raise RuntimeError("Cannot pass variable to state_to and save_to.") save_to._type_info.set_type( QiType.STATE, _TypeDefiningUse.RECORDING_SAVE_TO ) self.add_associated_variable(save_to) self.var = save_to elif isinstance(save_to, str): self.result_box = cell.get_result_container( save_to ) # container might have been added to cell before self.save_to = save_to cell.add_recording_length(length) self._length = length if isinstance(self._length, QiExpression): self._length._type_info.set_type( QiType.TIME, _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION ) self._offset: QiExpression = QiExpression._from(offset) self._offset._type_info.set_type( QiType.TIME, _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION ) for var in self._offset.contained_variables: var._relevant_cells.add(cell) self.toggleContinuous = toggleContinuous self.follows_readout = False try: cmd = _QiJobReference.commands[-1] if ( isinstance(cmd, cQiPlayReadout) and cmd.cell == self.cell ): # Warning if previous cmd is readout but different cell self.follows_readout = True cmd.recording = self cmd._associated_variable_set.update(self._associated_variable_set) except IndexError: pass @property def uses_state(self): return len(self._associated_variable_set) > 0 @property def length(self): return ( self._length() if isinstance(self._length, QiCellProperty) else self._length ) @length.setter def length(self, value): self._length = value @property def offset(self): return ( self._offset() if isinstance(self._offset, QiCellProperty) else self._offset ) def _stringify_args(self) -> str: """Determines non-default args to explicitly stringify""" arg_strings = [str(self.cell), str(self._length)] if not ( isinstance(self._offset, _QiConstValue) and self._offset._given_value == 0 ): arg_strings.append(f"offset={self._offset}") if self.result_box is not None: arg_strings.append(f'save_to="{self.result_box.name}"') if self.var is not None: arg_strings.append(f"state_to={self.var}") if self.toggleContinuous is not None: arg_strings.append(f"toggleContinuous={self.toggleContinuous}") return ", ".join(arg_strings) def _stringify(self) -> str: return f"Recording({self._stringify_args()})" class cQiStore(QiCellCommand): """Command generated by :meth:`Store`""" def __init__(self, cell, store_var: _QiVariableBase, save_to: QiResult): super().__init__(cell) self.store_var = store_var self.save_to = save_to self.add_associated_variable(store_var) def _stringify(self) -> str: return f"Store({self.cell}, {self.store_var}, {self.save_to})" class cQiAssign(QiVariableCommand): """Command generated by :meth:`Assign`""" def __init__(self, dst: _QiVariableBase, value: Union[QiExpression, int, float]): if not isinstance(dst, _QiVariableBase): raise TypeError("Target of Assign can only be a QiVariable.") super().__init__(dst) self._value = QiExpression._from(value) dst._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.ASSIGN) _add_equal_constraints( QiType.NORMAL, _TypeConstraintReasonQiCommand(cQiAssign), self._value, dst ) _add_equal_constraints( QiType.TIME, _TypeConstraintReasonQiCommand(cQiAssign), self._value, dst ) for variable in self.value.contained_variables: self.add_associated_variable(variable) @property def value(self): return self._value def accept(self, visitor, *input): return visitor.visit_assign_command(self, *input) def _stringify(self) -> str: return f"Assign({self.var}, {self._value})" class cQiDeclare(QiVariableCommand): """Command generated by initialization of new QiVariable""" def __init__(self, dst: _QiVariableBase) -> None: super().__init__(var=dst) def accept(self, visitor, *input): return visitor.visit_declare_command(self, *input) def _stringify(self) -> str: return f"v{self.var.str_id} = {self.var}" class cQiASM(QiCommand): def __init__(self, cells: QiCell, instr: SequencerInstruction, cycles: int): super().__init__() self._relevant_cells.add(cells) self.asm_instruction = instr self.cycles = cycles def accept(self, visitor, *input): return visitor.visit_asm_command(self, *input) def _stringify(self) -> str: return f"ASM({self.asm_instruction.get_riscv_instruction()})" class cQiMemStore(QiCommand): def __init__(self, cell: QiCell, addr: int, value): super().__init__() self._relevant_cells.add(cell) self.addr = addr self.value = value def accept(self, visitor, *input): return visitor.visit_mem_store_command(self, *input) def _stringify(self): cell_str = ", ".join(list(map(lambda x: f"{x}", self._relevant_cells))) return f"cQiMemStore({cell_str}, {self.addr}, {self.value})" class QiContextManager(QiCommand): """Base Class for If, Else, ForRange and Parallel. Defines functions for storing commands.""" def __init__(self) -> None: super().__init__() self.body: List[QiCommand] = [] def __enter__(self): _QiJobReference._open_new_context() return self def __exit__(self, exception_type, exception_value, traceback): self.body = _QiJobReference._close_context() _QiJobReference._add_command(self) def accept(self, visitor, *input): return visitor.visit_context_manager(self, *input) class If(QiContextManager): """ Add conditional logic to the program. If multiple cells are used inside the body, a synchronization between the cells takes place before the If. :param condition: The condition to check Example ------- .. code-block:: python with QiJob() as job: q = QiCells(1) x = QiIntVariable(1) with If(x > 1): ... # won't be executed The If statement is most commonly used to react to qubit states in real-time: .. code-block:: python from qiclib import jobs with QiJob() as job: q = QiCells(1) state = QiStateVariable() jobs.Readout(q[0], state_to=state) with If(state = 0): ... # Apply some conditional logic based on the qubit state """ def __init__(self, condition: Optional[QiCondition] = None): super().__init__() self._else_body: List[QiCommand] = [] if condition is None: raise RuntimeError("No QiCondition given") self.condition = condition for variable in condition.contained_variables: self.add_associated_variable(variable) def add_else_body(self, else_body): self._else_body = else_body.copy() def is_followed_by_else(self) -> bool: return len(self._else_body) != 0 def accept(self, visitor, *input): return visitor.visit_if(self, *input) def _stringify(self) -> str: return f"If({self.condition})" class Else(QiContextManager): """ Adds Conditional logic if the preceding :class:`If` command evaluates to false. :raises RuntimeError: When the preceeding command is not an :python:`If` command Example ------- .. code-block:: python from qiclib import jobs with QiJob() as job: q = QiCells(1) state = QiStateVariable() jobs.Readout(q[0], state_to=state) with If(state = 0): ... # Apply some conditional logic based on the qubit state with Else(): ... # State is 1 """ def __enter__(self): self.if_cmd = _QiJobReference.commands[-1] if not isinstance(self.if_cmd, If): raise RuntimeError("Else is not preceded by If") _QiJobReference._open_new_context() return self def __exit__(self, exception_type, exception_value, traceback): self.if_cmd.add_else_body(_QiJobReference._close_context()) class Parallel(QiContextManager): """Pulses defined in body are united in one trigger command.""" def __init__(self): super().__init__() self.entries: List[List[QiCommand]] = [] def __exit__(self, exception_type, exception_value, traceback): temp = _QiJobReference._close_context() self.body += temp # So visitors also find commands in Parallel blocks. self.entries.append(temp) containing_cells = QiCMContainedCellVisitor() for command in temp: if not isinstance( command, ( cQiPlay, cQiPlayReadout, cQiPlayFlux, cQiRotateFrame, cQiRecording, cQiWait, ), ): raise TypeError("Type not allowed inside Parallel()", command) if ( isinstance(command, (cQiRecording, cQiPlayReadout)) and command.uses_state ): raise RuntimeError("Can not save to state variable inside Parallel") try: if isinstance(command.length, _QiVariableBase): self._associated_variable_set.add(command.length) except KeyError: pass # length was QiCellProperty command.accept(containing_cells) self._relevant_cells.update(containing_cells.contained_cells) # If previous command is also parallel, combine by adding another parallel entry at previous command try: cmd = _QiJobReference.commands[-1] if isinstance(cmd, Parallel) and len(cmd.entries) < 2: cmd.entries.append(temp) cmd._associated_variable_set.update(self._associated_variable_set) else: _QiJobReference._add_command(self) except IndexError: _QiJobReference._add_command(self) class CmdTuple: def __init__(self, cmd: QiCommand, start: int, end: int, choke: bool = False): self.cmd = cmd self.start = start self.end = end self.choke_cmd = choke class TimeSlot: def __init__(self, cmd_tuples: List[Any], start, end): self.cmd_tuples: List[Parallel.CmdTuple] = cmd_tuples self.start: int = start self.end: int = end self.duration: float = 0.0 def _clear_wait_commands(self, cmd_tuples: List[CmdTuple]): """Clears cQiWait commands from cmd_tuples, if any trigger command is also in cmd_tuples""" contains_pulse = False for cmd_tuple in cmd_tuples: if isinstance(cmd_tuple.cmd, _cQiPlay_base): contains_pulse = True break return [ cmd_tuple for cmd_tuple in cmd_tuples if isinstance(cmd_tuple.cmd, _cQiPlay_base) or contains_pulse is False ] def _clear_choke_commands(self, cmd_tuples: List[CmdTuple]): """Clears choke commands, if at the same slot another Play or Readout command is present.""" contains_play = False contains_readout = False for cmd_tuple in cmd_tuples: if isinstance(cmd_tuple.cmd, cQiPlay) and cmd_tuple.choke_cmd is False: contains_play = True elif ( isinstance(cmd_tuple.cmd, cQiPlayReadout) and cmd_tuple.choke_cmd is False ): contains_readout = True if contains_play is False and contains_readout is False: return cmd_tuples cleared_tuples = [] for cmd_tuple in cmd_tuples: # if play command is present skip choke command for play if isinstance(cmd_tuple.cmd, cQiPlay): if cmd_tuple.choke_cmd is True and contains_play: continue # if PlayReadout command is present skip choke command for PlayReadout elif isinstance(cmd_tuple.cmd, cQiPlayReadout): if cmd_tuple.choke_cmd is True and contains_readout: continue cleared_tuples.append(cmd_tuple) return cleared_tuples def _create_time_slots(self, annotated_bodies: List[List[CmdTuple]], max_end: int): time_slot_list: List[Parallel.TimeSlot] = [] for start in range(0, max_end): time_slot = self.TimeSlot([], start, start) # find tuples with start time == start for cmd_list in annotated_bodies: for cmd_tuple in cmd_list: if cmd_tuple.start == start: time_slot.cmd_tuples.append(cmd_tuple) time_slot.end = max(cmd_tuple.end, time_slot.end) cmd_list.remove(cmd_tuple) break # next cmd_list # next start value, if nothing was found if len(time_slot.cmd_tuples) == 0: continue time_slot.cmd_tuples = self._clear_wait_commands(time_slot.cmd_tuples) time_slot.cmd_tuples = self._clear_choke_commands(time_slot.cmd_tuples) # Add Wait command, if previous end value < start try: prev_time_slot = time_slot_list[-1] if prev_time_slot.end < start: length = util.conv_cycles_to_time(start - prev_time_slot.end) new_wait = self.CmdTuple( cQiWait(list(self._relevant_cells)[0], length), start=prev_time_slot.end, end=start, ) time_slot_list.append( self.TimeSlot([new_wait], prev_time_slot.end, start) ) except IndexError: pass # Adjust previous end time, if previous.end > start try: prev_time_slot = time_slot_list[-1] prev_time_slot.end = min(prev_time_slot.end, start) except IndexError: pass time_slot_list.append(time_slot) # Add final wait, if previous.end != max_end try: prev_time_slot = time_slot_list[-1] if prev_time_slot.end < max_end: length = util.conv_cycles_to_time(max_end - prev_time_slot.end) new_wait = self.CmdTuple( cQiWait(list(self._relevant_cells)[0], length), start=prev_time_slot.end, end=max_end, ) time_slot_list.append( self.TimeSlot([new_wait], prev_time_slot.end, max_end) ) except IndexError: pass # calculate duration of time slot for slot in time_slot_list: slot.duration = util.conv_cycles_to_time(slot.end - slot.start) return time_slot_list def _generate_command_body(self, cell, sequencer): """Combines the parallel sequences to one command body.""" parallel_bodies: List[List[Parallel.CmdTuple]] = [] max_end = 0 # Generate annotated list of commands with start and end cycle for cmd_list in self.entries: commands: List[Parallel.CmdTuple] = [] start: int = 0 end: int = 0 for cmd in cmd_list: var_pulse = False if cell not in cmd._relevant_cells: continue # skip commands for other cells if isinstance(cmd.length, _QiVariableBase): reg = sequencer.get_var_register(cmd.length) if reg.valid is False or reg.value is None: raise RuntimeError( "Variable inside parallel not initialised or invalidated" ) length = reg.value if isinstance(cmd, (cQiPlay, cQiPlayReadout)): var_pulse = True else: length = util.conv_time_to_cycles(cmd.length, "ceil") if length == 0: continue # skip commands with length 0 if isinstance(cmd, cQiRecording) or ( isinstance(cmd, cQiPlayReadout) and isinstance(cmd.recording, cQiRecording) ): end += length + util.conv_time_to_cycles( sequencer.recording_delay, "ceil" ) else: end += length cmd_duration = self.CmdTuple(cmd, start, end) commands.append(cmd_duration) if var_pulse: # Add parallel choke command after current command, if variable length is used parallel_choke = [self.CmdTuple(cmd, end, end + 1, choke=True)] parallel_bodies.append(parallel_choke) max_end = max(end + 1, max_end) # +1 to account for choke command else: max_end = max(end, max_end) start = end parallel_bodies.append(commands) return self._create_time_slots(parallel_bodies, max_end) def accept(self, visitor, *input): return visitor.visit_parallel(self, *input) def _stringify(self) -> str: return "Parallel" class ForRange(QiContextManager): """Adds ForRange to program. If multiple cells are used inside body, a synchronisation between the cells is done before the ForRange as well as after the end of the body. If QiTimeVariable is used as var, loops starting at 0 are unrolled, to skip pulses/waits inside body using var as length. Raises exception if start, end and step are not set up properly.""" def __init__( self, var: _QiVariableBase, start: Union[_QiVariableBase, int, float], end: Union[_QiVariableBase, int, float], step: Union[int, float] = 1, ): super().__init__() if not isinstance(var, _QiVariableBase): raise RuntimeError( "Can only use QiVariables as control variable in ForRanges." ) start_expr = QiExpression._from(start) end_expr = QiExpression._from(end) step_expr = QiExpression._from(step) var._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.FOR_RANGE) start_expr._type_info.add_illegal_type( QiType.STATE, _IllegalTypeReason.FOR_RANGE ) end_expr._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.FOR_RANGE) step_expr._type_info.add_illegal_type( QiType.STATE, _IllegalTypeReason.FOR_RANGE ) _add_equal_constraints( QiType.TIME, _TypeConstraintReasonQiCommand(ForRange), var, start_expr, end_expr, step_expr, ) _add_equal_constraints( QiType.FREQUENCY, _TypeConstraintReasonQiCommand(ForRange), var, start_expr, end_expr, step_expr, ) _add_equal_constraints( QiType.NORMAL, _TypeConstraintReasonQiCommand(ForRange), var, start_expr, end_expr, step_expr, ) if not isinstance(start, _QiVariableBase) and not isinstance( end, _QiVariableBase ): if (start > end and step >= 0) or (start < end and step <= 0): raise ValueError("Definition of ForRange faulty") self.var = var self.start = start_expr self.end = end_expr self.step = step_expr self.add_associated_variable(var) if isinstance(start, _QiVariableBase): self.add_associated_variable(start) if start.id == var.id: raise RuntimeError("Loop variable can not be used as start value") if isinstance(end, _QiVariableBase): self.add_associated_variable(end) if end.id == var.id: raise RuntimeError("Loop variable can not be used as end value") def __exit__(self, exception_type, exception_value, traceback): super().__exit__(exception_type, exception_value, traceback) check_variable = QiVarInForRange(self.var) self.accept(check_variable) def accept(self, visitor, *input): return visitor.visit_for_range(self, *input) @property def is_step_positive(self) -> bool: return self.step > 0 def _stringify(self) -> str: return f"ForRange({self.var}, {self.start}, {self.end}, {self.step})" class QiVariable(_QiVariableBase): """Used as variables for use in program. If no type is provided as an argument, it will infer its type. """ def __init__( self, type: Union[QiType, Type[int], Type[float]] = QiType.UNKNOWN, value=None, name=None, ) -> None: if type == int: type = QiType.NORMAL elif type == float: type = QiType.TIME super().__init__(type, value, name=name) _add_cmd_to_job(cQiDeclare(self)) if self.value is not None: val = _QiConstValue(value) val._type_info.set_type(type, _TypeDefiningUse.VARIABLE_DEFINITION) _add_cmd_to_job(cQiAssign(self, val)) class QiJob: """ Container holding program, cells and qi_result containers for execution of program. Builds the job with its properties :param skip_nco_sync: if the NCO synchronization at the beginning should be skipped :param nco_sync_length: how long to wait after the nco synchronization """ def __init__( self, skip_nco_sync=False, nco_sync_length=0, ): self.qi_results: List[QiResult] = [] self.cells = [] self.skip_nco_sync = skip_nco_sync self.nco_sync_length = nco_sync_length self._description = _JobDescription() # Build self._performed_analyses = False self._build_done = False self._arranged_cells: List[Optional[QiCell]] = [] self._var_reg_map: Dict[_QiVariableBase, Dict[QiCell, int]] = {} # Run self._custom_processing = None self._custom_data_handler = None def __enter__(self): # pylint: disable=global-statement global _QiJobReference _QiJobReference = self return self def __exit__(self, exception_type, exception_value, traceback): for cmd in self.commands: cmd.accept(QiTypeFallbackVisitor()) for cmd in self.commands: cmd.accept(QiPostTypecheckVisitor()) _QiVariableBase.reset_str_id() # pylint: disable=global-statement global _QiJobReference _QiJobReference = None def _open_new_context(self): self._description.open_new_context() def _close_context(self): return self._description.close_context() def _add_command(self, command): self._description.add_command(command) @property def commands(self): """returns the commands of the job""" return self._description._commands def _register_cells(self, cells: List[QiCell]): if len(self.cells) > 0: raise RuntimeError("Can only register one set of cells at a QiJob.") self.cells = cells def _run_analyses(self): """ Executes needed (dataflow) analyses. These mutate the commands in QiJob by inserting additional instructions, therefore they should only run once, in order to avoid duplicate instructions. """ if not self._performed_analyses: insert_recording_offset_store_commands(self) insert_manipulation_pulse_frequency_store_commands(self) insert_readout_pulse_frequency_store_commands(self) self._performed_analyses = True def _simulate_recordings(self) -> Dict[Any, List[cQiRecording]]: """ Simulates the order cQiRecording executions. The result of this simulation is used to disentangle the recordings buffer and reassociate the individual recording results with their corresponding Recording commands. It might return more elements than are recorded during the real execution. """ # We first check if there are Recording commands at positions which we can not simulate. # i.e. If-Else, ForRanges with start or end that are neither constant nor other loop variables. # If this is the case we cannot simulate the order. visitor = QiResultCollector() for cmd in self.commands: cmd.accept(visitor) if len(visitor.found_qi_results) == 0: return {cell: [] for cell in self.cells} elif visitor.recording_in_if: raise RuntimeError("Recording command within If-Else statement.") # Next we simulate all loops and collect the respective Recording commands inside. simulator = Simulator(self.cells) simulator._simulate(self.commands) return simulator.cell_recordings def _build_program( self, sample: Optional[QiSample] = None, cell_map: Optional[List[int]] = None ): if sample is not None and cell_map is not None: sample = sample._arrange_for_controller() sample = [sample[m] if m < len(sample) else None for m in cell_map] if cell_map is None: cell_map = list(range(len(self.cells))) # TODO Check that this works with None and right order now self._resolve_properties(sample) for cell in self.cells: if len(cell._get_unresolved_properties()) > 0: raise RuntimeError( f"Unresolved properties {cell._get_unresolved_properties()} at cell {cell}" ) self._run_analyses() sim_result = self._simulate_recordings() for cell in self.cells: cell._result_recording_order = list( map( lambda x: x.result_box, filter(lambda x: x.result_box is not None, sim_result[cell]), ) ) prog_builder = QiProgramBuilder( self.cells, cell_map, self._description._commands.copy(), self.skip_nco_sync, self.nco_sync_length, ) self.cell_seq_dict = prog_builder.build_program() self._var_reg_map = prog_builder.get_all_variables() self._build_done = True def _get_sequencer_codes(self): return [ [ instr.get_riscv_instruction() for instr in self.cell_seq_dict[cell].instruction_list ] for cell in self.cells ] def create_experiment( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): exp = QiCodeExperiment( *self._prepare_experiment_params( controller, sample, averages, cell_map, data_collection, use_taskrunner ) ) if data_collection is None: if self._custom_processing is not None: exp._taskrunner.update(self._custom_processing) if self._custom_data_handler is not None: exp._data_handler_factory = DataHandler.get_custom_wrapper_factory( self._custom_data_handler ) # Provide a human-readable description of the execution if cell_map is None: cell_map = list(range(len(self.cells))) str_map = ", ".join([f"q[{i}] -> sample[{m}]" for i, m in enumerate(cell_map)]) exp._job_representation = f"{self}\n\nmapped as {str_map} to\n\n{sample}" return exp def _prepare_experiment_params( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): if len(self.cells) > len(controller.cell): raise IndexError( f"This job requires {len(self.cells)} cells but only " f"{len(controller.cell)} are available in the QiController." ) if data_collection is None: if self._custom_processing is None: data_collection = "average" else: data_collection = "custom" # If float, convert averages to int averages = int(averages) if sample is None: sample = QiSample(len(controller.cell)) elif len(sample) < len(self.cells): raise ValueError( "Need to submit a QiSample with at least as many cells as the job " f"has ({len(self.cells)}), but only {len(sample)} provided." ) if cell_map is None: # Use the first cells of the sample cell_map = list(range(len(self.cells))) else: if len(cell_map) != len(self.cells): raise ValueError( "cell_map needs to have as many entries as the job has cells, but " f"{len(cell_map)} entries given and {len(self.cells)} required!" ) if len(set(cell_map)) != len(cell_map): raise ValueError("Duplicate values not allowed in cell_map!") if any(m < 0 or m >= len(sample) for m in cell_map): raise IndexError( "cell_map values can only point to valid indices within the passed" f" QiSample object, i.e. values between 0 and {len(sample) - 1}." ) # Translate cell_map from sample cells ("cells") to QiController cells cell_map = [sample.cell_map[c] for c in cell_map] if any(c < 0 or c >= len(controller.cell) for c in cell_map): raise ValueError( "The QiSample cell_map can only reference available QiController " f"cells, i.e. between 0 and {len(controller.cell) - 1}." ) self._build_program(sample, cell_map) for_range_list = [] for cell in self.cells: for_range_list.append(self.cell_seq_dict[cell]._for_range_list) return ( controller, self.cells, self._get_sequencer_codes(), averages, for_range_list, cell_map, self._var_reg_map, data_collection, use_taskrunner, ) def run( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): """executes the job and returns the results :param controller: the QiController on which the job should be executed :param sample: the QiSample object used for execution of pulses and extracts parameters for the experiment :param averages: the number of executions that should be averaged, by default 1 :param cell_map: A list containing the indices of the cells :param data_collection: the data_collection mode for the result, by default "average" :param use_taskrunner: if the execution should be handled by the Taskrunner Some advanced schemes and data_collection modes are currently only supported by the Taskrunner and not yet by a native control flow. """ exp = self.create_experiment( controller, sample, averages, cell_map, data_collection, use_taskrunner ) exp.run() def run_with_data_callback(self, on_new_data: Callable[[dict], None]): pass def run_streamed(self): pass def set_custom_data_processing( self, file: str, params: Optional[List] = None, converter: Optional[Callable[[List], List]] = None,
mode: Union[TaskRunner.DataMode, str] = TaskRunner.DataMode.INT32,
0
2023-11-10 10:26:10+00:00
24k
fg320/DEASC
examples/12C_5x1_farm_dyn_tuning_wso_grouping_looping.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by p...
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import Tuning from deasc import GPWrap from deasc import TuningDyn_Grouping from deasc import TuningDyn_Looping_Turbine from deasc.utils_floris import ( floris_extract_object_dict, floris_extract_parameter, floris_param_change_object_dict, floris_param_change_object )
15,170
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model)
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model)
wf_model_dict = floris_param_change_object_dict(wf_model_dict,
8
2023-11-10 18:13:27+00:00
24k
PlaxtonFlarion/NexaFlow
nexaflow/skills/alynex.py
[ { "identifier": "toolbox", "path": "nexaflow/toolbox.py", "snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.L...
import os import cv2 import time import random import asyncio from loguru import logger from typing import List, Union, Optional from concurrent.futures import ThreadPoolExecutor from nexaflow import toolbox from nexaflow.skills.report import Report from nexaflow.skills.record import Record from nexaflow.skills.player import Player from nexaflow.skills.switch import Switch from nexaflow.cutter.cutter import VideoCutter from nexaflow.video import VideoObject, Frame from nexaflow.classifier.keras_classifier import KerasClassifier from nexaflow.hook import BaseHook, CropHook, OmitHook, FrameSaveHook from nexaflow.classifier.base import ClassifierResult, SingleClassifierResult
16,763
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record() self.__player: Optional[Player] = Player() self.__ffmpeg: Optional[Switch] = Switch() self.__filmer: Optional[Alynex._Filmer] = Alynex._Filmer() self.__framix: Optional[Alynex._Framix] = None def __str__(self): return (f""" <Alynex for NexaFlow Target Size: {self.target_size} Fps: {self.fps} Step: {self.step} Block: {self.block} Threshold: {self.threshold} Offset: {self.offset} Compress Rate: {self.compress_rate} Window Size: {self.window_size} Window Coefficient: {self.window_coefficient} > """) __repr__ = __str__ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @property def report(self) -> "Report": assert self.__report, f"{self.activate.__name__} first ..." return self.__report @property def record(self) -> "Record": return self.__record @property def player(self) -> "Player": return self.__player @property def ffmpeg(self) -> "Switch": return self.__ffmpeg @property def filmer(self) -> "Alynex._Filmer": return self.__filmer @property def framix(self) -> "Alynex._Framix": assert self.__framix, f"{self.activate.__name__} first ..." return self.__framix @staticmethod def only_video(folder: str) -> List: class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file)] ) for root, _, file in os.walk(folder) if file ] def activate(self, models: str, total_path: str): if not self.__report: self.__report = Report(total_path) self.__framix = Alynex._Framix(self.report) Alynex.kc.load_model(models) class _Filmer(object): @staticmethod def train_model(video_file: str) -> None: model_path = os.path.join( os.path.dirname(video_file), f"Model_{time.strftime('%Y%m%d%H%M%S')}_{os.getpid()}" ) if not os.path.exists(model_path): os.makedirs(model_path, exist_ok=True) # 将视频切分成帧
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record() self.__player: Optional[Player] = Player() self.__ffmpeg: Optional[Switch] = Switch() self.__filmer: Optional[Alynex._Filmer] = Alynex._Filmer() self.__framix: Optional[Alynex._Framix] = None def __str__(self): return (f""" <Alynex for NexaFlow Target Size: {self.target_size} Fps: {self.fps} Step: {self.step} Block: {self.block} Threshold: {self.threshold} Offset: {self.offset} Compress Rate: {self.compress_rate} Window Size: {self.window_size} Window Coefficient: {self.window_coefficient} > """) __repr__ = __str__ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @property def report(self) -> "Report": assert self.__report, f"{self.activate.__name__} first ..." return self.__report @property def record(self) -> "Record": return self.__record @property def player(self) -> "Player": return self.__player @property def ffmpeg(self) -> "Switch": return self.__ffmpeg @property def filmer(self) -> "Alynex._Filmer": return self.__filmer @property def framix(self) -> "Alynex._Framix": assert self.__framix, f"{self.activate.__name__} first ..." return self.__framix @staticmethod def only_video(folder: str) -> List: class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file)] ) for root, _, file in os.walk(folder) if file ] def activate(self, models: str, total_path: str): if not self.__report: self.__report = Report(total_path) self.__framix = Alynex._Framix(self.report) Alynex.kc.load_model(models) class _Filmer(object): @staticmethod def train_model(video_file: str) -> None: model_path = os.path.join( os.path.dirname(video_file), f"Model_{time.strftime('%Y%m%d%H%M%S')}_{os.getpid()}" ) if not os.path.exists(model_path): os.makedirs(model_path, exist_ok=True) # 将视频切分成帧
video = VideoObject(video_file, fps=Alynex.fps)
6
2023-11-13 05:27:34+00:00
24k
microsoft/SoM
task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py
[ { "identifier": "Visualizer", "path": "task_adapter/utils/visualizer.py", "snippet": "class Visualizer:\n \"\"\"\n Visualizer that draws data about detection/segmentation on images.\n\n It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`\n that draw primitive objects t...
import torch import numpy as np import matplotlib.pyplot as plt import cv2 import io import cv2 # type: ignore from torchvision import transforms from task_adapter.utils.visualizer import Visualizer from typing import Tuple from PIL import Image from detectron2.data import MetadataCatalog from .automatic_mask_generator import SemanticSamAutomaticMaskGenerator from task_adapter.utils.visualizer import Visualizer
15,586
# -------------------------------------------------------- # Semantic-SAM: Segment and Recognize Anything at Any Granularity # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Hao Zhang (hzhangcx@connect.ust.hk) # -------------------------------------------------------- metadata = MetadataCatalog.get('coco_2017_train_panoptic') def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): t = [] t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) transform1 = transforms.Compose(t) image_ori = transform1(image) image_ori = np.asarray(image_ori) images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32, pred_iou_thresh=0.88, stability_score_thresh=0.92, min_mask_region_area=10, level=level, ) outputs = mask_generator.generate(images)
# -------------------------------------------------------- # Semantic-SAM: Segment and Recognize Anything at Any Granularity # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Hao Zhang (hzhangcx@connect.ust.hk) # -------------------------------------------------------- metadata = MetadataCatalog.get('coco_2017_train_panoptic') def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): t = [] t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) transform1 = transforms.Compose(t) image_ori = transform1(image) image_ori = np.asarray(image_ori) images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32, pred_iou_thresh=0.88, stability_score_thresh=0.92, min_mask_region_area=10, level=level, ) outputs = mask_generator.generate(images)
visual = Visualizer(image_ori, metadata=metadata)
0
2023-10-16 03:39:26+00:00
24k
hkchengrex/Cutie
gui/main_controller.py
[ { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_...
import os import logging import cv2 import torch import numpy as np from os import path from typing import Literal from torch import mps from torch import autocast from torchvision.transforms.functional import to_tensor from omegaconf import DictConfig, open_dict from cutie.model.cutie import CUTIE from cutie.inference.inference_core import InferenceCore from gui.interaction import * from gui.interactive_utils import * from gui.resource_manager import ResourceManager from gui.gui import GUI from gui.click_controller import ClickController from gui.reader import PropagationReader, get_data_loader from gui.exporter import convert_frames_to_video, convert_mask_to_binary from scripts.download_models import download_models_if_needed
15,849
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed() self.cutie = CUTIE(self.cfg).eval().to(self.device) model_weights = torch.load(self.cfg.weights, map_location=self.device) self.cutie.load_weights(model_weights)
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed() self.cutie = CUTIE(self.cfg).eval().to(self.device) model_weights = torch.load(self.cfg.weights, map_location=self.device) self.cutie.load_weights(model_weights)
self.click_ctrl = ClickController(self.cfg.ritm_weights, device=self.device)
4
2023-10-19 17:49:24+00:00
24k
ZhengyiLuo/PerpetualHumanoidControl
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings...
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
18,285
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000 task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000 task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
plotter = Matplotlib3DPlotter(task)
5
2023-10-15 19:05:47+00:00
24k
e4s2023/E4S2023
our_swap_face_video_pipeline2.py
[ { "identifier": "Net3", "path": "models/networks.py", "snippet": "class Net3(nn.Module):\n \"\"\" FSEncoder + styleGAN2 \"\"\"\n\n def __init__(self,opts,):\n super(Net3, self).__init__()\n self.opts=opts\n assert self.opts.fsencoder_type in [\"psp\",\"sean\"]\n if self...
import copy import cv2 import torch import numpy as np import torchvision.transforms as transforms import os import glob import torch.nn as nn from PIL import Image from models.networks import Net3 from datasets.dataset import get_transforms, TO_TENSOR, NORMALIZE from utils import torch_utils from tqdm import tqdm from torch.nn import functional as F from training.video_swap_ft_coach import VideoSwapPTICoach from tqdm import trange from options.our_swap_face_pipeline_options import OurSwapFacePipelineOptions from swap_face_fine.swap_face_mask import swap_head_mask_revisit, swap_head_mask_hole_first from utils.morphology import dilation, erosion from training.video_swap_ft_coach import dialate_mask, erode_mask from swap_face_fine.multi_band_blending import blending from utils.alignment import crop_faces, calc_alignment_coefficients # 这一句有 import的 bug,大概率 dlib的问题 from skimage.transform import resize # 这一句有点问题 from swap_face_fine.face_vid2vid.drive_demo import init_facevid2vid_pretrained_model, drive_source_demo # 这一行有点问题 from swap_face_fine.gpen.gpen_demo import init_gpen_pretrained_model, GPEN_demo from swap_face_fine.face_parsing.face_parsing_demo import init_faceParsing_pretrained_model, faceParsing_demo, vis_parsing_maps
14,816
# from training.video_swap_st_constraint import VideoSwapPTICoach # from training.video_swap_stich_coach import VideoSwapStichingCoach def create_masks(mask, operation='dilation', radius=0): temp = copy.deepcopy(mask) if operation == 'dilation': full_mask = dilation(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution') border_mask = full_mask - temp elif operation == 'erosion':
# from training.video_swap_st_constraint import VideoSwapPTICoach # from training.video_swap_stich_coach import VideoSwapStichingCoach def create_masks(mask, operation='dilation', radius=0): temp = copy.deepcopy(mask) if operation == 'dilation': full_mask = dilation(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution') border_mask = full_mask - temp elif operation == 'erosion':
full_mask = erosion(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution')
10
2023-10-15 12:15:01+00:00
24k
sotopia-lab/sotopia
sotopia/server.py
[ { "identifier": "Agents", "path": "sotopia/agents/llm_agent.py", "snippet": "class Agents(dict[str, BaseAgent[Observation, AgentAction]]):\n def reset(self) -> None:\n for agent in self.values():\n agent.reset()\n\n def act(self, obs: dict[str, Observation]) -> dict[str, AgentAct...
import asyncio import functools import itertools import logging import gin import rich from typing import Callable, Literal, Sequence, Type, cast from beartype import beartype from tqdm.asyncio import tqdm_asyncio from sotopia.agents import ( Agents, HumanAgent, LLMAgent, RedisAgent, ScriptWritingAgent, SpeakAgent, ) from sotopia.agents.base_agent import BaseAgent from sotopia.database import EpisodeLog from sotopia.database.persistent_profile import ( AgentProfile, EnvironmentProfile, ) from sotopia.envs import ParallelSotopiaEnv from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, unweighted_aggregate_evaluate, ) from sotopia.generation_utils.generate import LLM_Name, agenerate_script from sotopia.messages import AgentAction, Message, Observation from sotopia.messages.message_classes import ( ScriptBackground, ScriptEnvironmentResponse, ScriptInteraction, ) from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, UniformSampler, )
18,296
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else:
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else:
agents[agent_name] = LLMAgent(agent_name, model_name=agent_model)
2
2023-10-23 19:47:26+00:00
24k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray,...
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
16,854
y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs ) interp2 = lambda xq, yq, zq, *args, **kwargs: Interpolator3D(*args, **kwargs)( xq, yq, zq ) for interp in [interp1, interp2]: fq = interp(x, y, z, xp, yp, zp, fp) np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-5, atol=1e-2) fq = interp(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-2, atol=1) fq = interp(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-3, atol=1e-1) atol = 5.5e-3 rtol = 1e-5 fq = interp(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp3d_vector_valued(self): """Test for interpolating vector valued function.""" x = np.linspace(0, np.pi, 1000) y = np.linspace(0, 2 * np.pi, 1000) z = np.linspace(0, 3, 1000) xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.array( [np.sin(x) * np.cos(y) * z**2, 0.1 * (x + y - z)] ) fp = f(xxp.T, yyp.T, zzp.T).T fq = interp3d(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-2, atol=1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-3, atol=1e-1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-5, atol=5e-3) @pytest.mark.unit def test_fft_interp1d(): """Test for 1d Fourier interpolation.""" def fun(x): return 2 * np.sin(1 * x) + 4 * np.cos(3 * x) + 1 x = {"o": {}, "e": {}} x["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) x["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) x["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) x["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) f1 = {} for p in ["o", "e"]: f1[p] = {} for i in [1, 2]: f1[p][i] = fun(x[p][i]) for sp in ["o", "e"]: # source parity fi = f1[sp][1] fs = fun(x[sp][1] + 0.2) np.testing.assert_allclose(
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs) interp2 = lambda xq, *args, **kwargs: Interpolator1D(*args, **kwargs)(xq) for interp in [interp1, interp2]: fq = interp(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x), rtol=1e-2, atol=1e-1) fq = interp(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 2 * np.pi, 100) x = np.linspace(0, 2 * np.pi, 300)[10:-10] f = lambda x: np.array([np.sin(x), np.cos(x)]) fp = f(xp).T fq = interp1d(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x).T, rtol=1e-2, atol=1e-1) fq = interp1d(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_extrap_periodic(self): """Test extrapolation and periodic BC of 1d interpolation.""" xp = np.linspace(0, 2 * np.pi, 200) x = np.linspace(-1, 2 * np.pi + 1, 10000) f = lambda x: np.sin(x) fp = f(xp) fq = interp1d(x, xp, fp, method="cubic", extrap=False) assert np.isnan(fq[0]) assert np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", extrap=True) assert not np.isnan(fq[0]) assert not np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", period=2 * np.pi) np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-2) @pytest.mark.unit def test_interp1d_monotonic(self): """Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs ) interp2 = lambda xq, yq, zq, *args, **kwargs: Interpolator3D(*args, **kwargs)( xq, yq, zq ) for interp in [interp1, interp2]: fq = interp(x, y, z, xp, yp, zp, fp) np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-5, atol=1e-2) fq = interp(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-2, atol=1) fq = interp(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-3, atol=1e-1) atol = 5.5e-3 rtol = 1e-5 fq = interp(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp3d_vector_valued(self): """Test for interpolating vector valued function.""" x = np.linspace(0, np.pi, 1000) y = np.linspace(0, 2 * np.pi, 1000) z = np.linspace(0, 3, 1000) xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.array( [np.sin(x) * np.cos(y) * z**2, 0.1 * (x + y - z)] ) fp = f(xxp.T, yyp.T, zzp.T).T fq = interp3d(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-2, atol=1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-3, atol=1e-1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-5, atol=5e-3) @pytest.mark.unit def test_fft_interp1d(): """Test for 1d Fourier interpolation.""" def fun(x): return 2 * np.sin(1 * x) + 4 * np.cos(3 * x) + 1 x = {"o": {}, "e": {}} x["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) x["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) x["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) x["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) f1 = {} for p in ["o", "e"]: f1[p] = {} for i in [1, 2]: f1[p][i] = fun(x[p][i]) for sp in ["o", "e"]: # source parity fi = f1[sp][1] fs = fun(x[sp][1] + 0.2) np.testing.assert_allclose(
fs, fft_interp1d(fi, *fi.shape, sx=0.2, dx=np.diff(x[sp][1])[0]).squeeze()
0
2023-10-18 13:12:20+00:00
24k
city96/ComfyUI_ExtraModels
PixArt/sampler.py
[ { "identifier": "gaussian_diffusion", "path": "PixArt/sampling/gaussian_diffusion.py", "snippet": "def mean_flat(tensor):\n def is_vb(self):\ndef _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):\ndef get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_time...
import torch import comfy.utils import latent_preview from .sampling import gaussian_diffusion as gd from .sampling.dpm_solver import model_wrapper, DPM_Solver, NoiseScheduleVP from comfy.sample import prepare_sampling, prepare_noise, cleanup_additional_models, get_models_from_cond
20,027
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule.
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule.
betas = torch.tensor(gd.get_named_beta_schedule(noise_schedule, 1000))
3
2023-10-20 21:19:44+00:00
24k
amitfin/oref_alert
custom_components/oref_alert/area_utils.py
[ { "identifier": "CITY_ALL_AREAS", "path": "custom_components/oref_alert/metadata/city_all_areas.py", "snippet": "CITY_ALL_AREAS = {\n \"אשדוד - כל האזורים\": [\n \"אשדוד - א,ב,ד,ה\",\n \"אשדוד - איזור תעשייה צפוני\",\n \"אשדוד - ג,ו,ז\",\n \"אשדוד - ח,ט,י,יג,יד,טז\",\n ...
from .metadata.city_all_areas import CITY_ALL_AREAS from .metadata.district_to_areas import DISTRICT_AREAS
18,992
"""Utilities for metadata information.""" def expand_areas_and_groups(areas_and_groups: list[str]) -> list[str]: """Expand groups (if exists) to areas.""" areas = [] for area_or_group in areas_and_groups: if area_or_group in CITY_ALL_AREAS: areas.extend(CITY_ALL_AREAS[area_or_group])
"""Utilities for metadata information.""" def expand_areas_and_groups(areas_and_groups: list[str]) -> list[str]: """Expand groups (if exists) to areas.""" areas = [] for area_or_group in areas_and_groups: if area_or_group in CITY_ALL_AREAS: areas.extend(CITY_ALL_AREAS[area_or_group])
elif area_or_group in DISTRICT_AREAS:
1
2023-10-18 11:16:41+00:00
24k
RobertCsordas/moe
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ...
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_preln_kvmem_transformer import PrelnRelativeKVMemTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.topk_transformer import TopkTransformer from layers.moe_layer import MoE from interfaces import Result
18,710
parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}:
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_keys", default="128", parser=parser.int_list_parser) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-pkm.knn", default=32) parser.add_argument("-pkm.stochastic", default=False) parser.add_argument("-pkm.query_batchnorm", default=False) parser.add_argument("-pkm.custom_init", default=0) parser.add_argument("-pkm.slice_values", default=False) parser.add_argument("-pkm.slice_proj", default=False) parser.add_argument("-pkm.sample_smallest", default=False) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="add", choice=["add", "gate", "sigmoid", "gumbel", "hard_gumbel", "predict", "predict_mlp", "classify", "gumbel_sigmoid", "sinkhorn", "sinkhorn2", "sinkmoid", "sinkmax", "moe", "mul", "random", "sinkmoid2", "sinkmax2", "modulate"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}:
mklayer = lambda: TopkTransformer(
7
2023-10-16 11:26:45+00:00
24k
boppreh/hello_tls
src/hello_tls/scan.py
[ { "identifier": "ClientHello", "path": "src/hello_tls/protocol.py", "snippet": "class ScanError(Exception):\nclass ServerAlertError(ScanError):\nclass BadServerResponse(ScanError):\nclass ServerHello:\nclass ClientHello:\n def __init__(self, level: AlertLevel, description: AlertDescription):\ndef _ma...
from enum import Enum from multiprocessing.pool import ThreadPool from typing import Iterable, Union, List, Optional, Iterator, Callable, Any from urllib.parse import urlparse from datetime import datetime, timezone from .protocol import ClientHello, ScanError, make_client_hello, parse_server_hello, ServerAlertError, BadServerResponse, ServerHello, logger from .names_and_numbers import AlertDescription, CipherSuite, Group, Protocol, CompressionMethod from OpenSSL import SSL, crypto import socket import re import dataclasses import ssl, select
14,595
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello: """ Sends a Client Hello to the server, and returns the parsed ServerHello. Raises exceptions for the different alert messages the server can send. """ sock = make_socket(connection_settings) sock.send(make_client_hello(client_hello)) packet_stream = iter(lambda: sock.recv(4096), b'') server_hello = parse_server_hello(packet_stream) if server_hello.version not in client_hello.protocols: # Server picked a protocol we didn't ask for. logger.info(f"Server attempted to downgrade protocol to unsupported version {server_hello.version}") raise DowngradeError(f"Server attempted to downgrade from {client_hello.protocols} to {server_hello.version}") return server_hello def _iterate_server_option(connection_settings: ConnectionSettings, client_hello: ClientHello, request_option: str, response_option: str, on_response: Callable[[ServerHello], None] = lambda s: None) -> Iterator[Any]: """ Continually sends Client Hello packets to the server, removing the `response_option` from the list of options each time, until the server rejects the handshake. """ # We'll be mutating the list of options, so make a copy. options_to_test = list(getattr(client_hello, request_option)) # TODO: figure out how to have mypy accept this line. client_hello = dataclasses.replace(client_hello, **{request_option: options_to_test}) # type: ignore logger.info(f"Enumerating server {response_option} with {len(options_to_test)} options and protocols {client_hello.protocols}") while options_to_test: try: logger.debug(f"Offering {len(options_to_test)} {response_option} over {client_hello.protocols}: {options_to_test}") server_hello = send_hello(connection_settings, client_hello) on_response(server_hello) except DowngradeError: break
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello: """ Sends a Client Hello to the server, and returns the parsed ServerHello. Raises exceptions for the different alert messages the server can send. """ sock = make_socket(connection_settings) sock.send(make_client_hello(client_hello)) packet_stream = iter(lambda: sock.recv(4096), b'') server_hello = parse_server_hello(packet_stream) if server_hello.version not in client_hello.protocols: # Server picked a protocol we didn't ask for. logger.info(f"Server attempted to downgrade protocol to unsupported version {server_hello.version}") raise DowngradeError(f"Server attempted to downgrade from {client_hello.protocols} to {server_hello.version}") return server_hello def _iterate_server_option(connection_settings: ConnectionSettings, client_hello: ClientHello, request_option: str, response_option: str, on_response: Callable[[ServerHello], None] = lambda s: None) -> Iterator[Any]: """ Continually sends Client Hello packets to the server, removing the `response_option` from the list of options each time, until the server rejects the handshake. """ # We'll be mutating the list of options, so make a copy. options_to_test = list(getattr(client_hello, request_option)) # TODO: figure out how to have mypy accept this line. client_hello = dataclasses.replace(client_hello, **{request_option: options_to_test}) # type: ignore logger.info(f"Enumerating server {response_option} with {len(options_to_test)} options and protocols {client_hello.protocols}") while options_to_test: try: logger.debug(f"Offering {len(options_to_test)} {response_option} over {client_hello.protocols}: {options_to_test}") server_hello = send_hello(connection_settings, client_hello) on_response(server_hello) except DowngradeError: break
except ServerAlertError as error:
0
2023-10-21 02:00:13+00:00
24k
zhaojw1998/AccoMontage-3
arrangement_utils.py
[ { "identifier": "split_phrases", "path": "piano_arranger/acc_utils.py", "snippet": "def split_phrases(segmentation):\n \"\"\"Split a phrase label string into individual phrase meta info\"\"\"\n if '\\n' not in segmentation:\n segmentation += '\\n'\n phrases = []\n lengths = []\n cu...
import os import pretty_midi as pyd import numpy as np import torch import piano_arranger.format_converter as cvt from torch.utils.data import DataLoader from scipy.interpolate import interp1d from tqdm import tqdm from piano_arranger.acc_utils import split_phrases from piano_arranger.models import DisentangleVAE from piano_arranger.AccoMontage import find_by_length, dp_search, re_harmonization, get_texture_filter, ref_spotlight from orchestrator import Slakh2100_Pop909_Dataset, collate_fn, compute_pr_feat, EMBED_PROGRAM_MAPPING, Prior from orchestrator.QA_dataset import SLAKH_CLASS_PROGRAMS from orchestrator.utils import grid2pr, pr2grid, matrix2midi, midi2matrix from orchestrator.prior_dataset import TOTAL_LEN_BIN, ABS_POS_BIN, REL_POS_BIN
18,914
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo) acc = np.array([grid2pr(matrix) for matrix in acc]) print('Piano accompaiment generated!') return midi_recon, acc def prompt_sampling(acc_piano, REF, REF_PROG, REF_MIX, DEVICE='cuda:0'):
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo) acc = np.array([grid2pr(matrix) for matrix in acc]) print('Piano accompaiment generated!') return midi_recon, acc def prompt_sampling(acc_piano, REF, REF_PROG, REF_MIX, DEVICE='cuda:0'):
ref_mix = torch.from_numpy(compute_pr_feat(acc_piano[0:1])[-1]).to(DEVICE)
9
2023-10-23 12:36:57+00:00
24k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/peft_model.py
[ { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The...
import inspect import os import warnings import torch import torch.nn as nn from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .utils import PeftConfig from .shared import Gate, GateN from .tuners import ( AdaLoraModel, AdaptionPromptModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder, MMOELoraModelS, ) from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
14,482
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder,
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder,
PeftType.ADALORA: AdaLoraModel,
5
2023-10-19 10:55:50+00:00
24k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/test_integration.py
[ { "identifier": "ExitCheckTuple", "path": "freqtrade/enums/exitchecktuple.py", "snippet": "class ExitCheckTuple:\n \"\"\"\n NamedTuple for Exit type + reason\n \"\"\"\n exit_type: ExitType\n exit_reason: str = ''\n\n def __init__(self, exit_type: ExitType, exit_reason: str = ''):\n ...
from unittest.mock import MagicMock from sqlalchemy import select from freqtrade.enums import ExitCheckTuple, ExitType, TradingMode from freqtrade.persistence import Trade from freqtrade.persistence.models import Order from freqtrade.rpc.rpc import RPC from tests.conftest import EXMS, get_patched_freqtradebot, log_has_re, patch_get_signal import pytest
19,516
def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, limit_buy_order, mocker) -> None: """ Tests workflow of selling stoploss_on_exchange. Sells * first trade as stoploss * 2nd trade is kept * 3rd trade is sold via sell-signal """ default_conf['max_open_trades'] = 3 default_conf['exchange']['name'] = 'binance' stoploss = { 'id': 123, 'info': {} } stoploss_order_open = { "id": "123", "timestamp": 1542707426845, "datetime": "2018-11-20T09:50:26.845Z", "lastTradeTimestamp": None, "symbol": "BTC/USDT", "type": "stop_loss_limit", "side": "sell", "price": 1.08801, "amount": 91.07468123, "cost": 0.0, "average": 0.0, "filled": 0.0, "remaining": 0.0, "status": "open", "fee": None, "trades": None } stoploss_order_closed = stoploss_order_open.copy() stoploss_order_closed['status'] = 'closed' stoploss_order_closed['filled'] = stoploss_order_closed['amount'] # Sell first trade based on stoploss, keep 2nd and 3rd trade open stop_orders = [stoploss_order_closed, stoploss_order_open, stoploss_order_open] stoploss_order_mock = MagicMock( side_effect=stop_orders) # Sell 3rd trade (not called for the first trade) should_sell_mock = MagicMock(side_effect=[ [], [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)]] ) cancel_order_mock = MagicMock() mocker.patch.multiple( EXMS, create_stoploss=stoploss, fetch_ticker=ticker, get_fee=fee, amount_to_precision=lambda s, x, y: y, price_to_precision=lambda s, x, y: y, fetch_stoploss_order=stoploss_order_mock, cancel_stoploss_order_with_result=cancel_order_mock, ) mocker.patch.multiple( 'freqtrade.freqtradebot.FreqtradeBot', create_stoploss_order=MagicMock(return_value=True), _notify_exit=MagicMock(), ) mocker.patch("freqtrade.strategy.interface.IStrategy.should_exit", should_sell_mock) wallets_mock = mocker.patch("freqtrade.wallets.Wallets.update") mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=1000) mocker.patch("freqtrade.wallets.Wallets.check_exit_amount", return_value=True)
def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, limit_buy_order, mocker) -> None: """ Tests workflow of selling stoploss_on_exchange. Sells * first trade as stoploss * 2nd trade is kept * 3rd trade is sold via sell-signal """ default_conf['max_open_trades'] = 3 default_conf['exchange']['name'] = 'binance' stoploss = { 'id': 123, 'info': {} } stoploss_order_open = { "id": "123", "timestamp": 1542707426845, "datetime": "2018-11-20T09:50:26.845Z", "lastTradeTimestamp": None, "symbol": "BTC/USDT", "type": "stop_loss_limit", "side": "sell", "price": 1.08801, "amount": 91.07468123, "cost": 0.0, "average": 0.0, "filled": 0.0, "remaining": 0.0, "status": "open", "fee": None, "trades": None } stoploss_order_closed = stoploss_order_open.copy() stoploss_order_closed['status'] = 'closed' stoploss_order_closed['filled'] = stoploss_order_closed['amount'] # Sell first trade based on stoploss, keep 2nd and 3rd trade open stop_orders = [stoploss_order_closed, stoploss_order_open, stoploss_order_open] stoploss_order_mock = MagicMock( side_effect=stop_orders) # Sell 3rd trade (not called for the first trade) should_sell_mock = MagicMock(side_effect=[ [], [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)]] ) cancel_order_mock = MagicMock() mocker.patch.multiple( EXMS, create_stoploss=stoploss, fetch_ticker=ticker, get_fee=fee, amount_to_precision=lambda s, x, y: y, price_to_precision=lambda s, x, y: y, fetch_stoploss_order=stoploss_order_mock, cancel_stoploss_order_with_result=cancel_order_mock, ) mocker.patch.multiple( 'freqtrade.freqtradebot.FreqtradeBot', create_stoploss_order=MagicMock(return_value=True), _notify_exit=MagicMock(), ) mocker.patch("freqtrade.strategy.interface.IStrategy.should_exit", should_sell_mock) wallets_mock = mocker.patch("freqtrade.wallets.Wallets.update") mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=1000) mocker.patch("freqtrade.wallets.Wallets.check_exit_amount", return_value=True)
freqtrade = get_patched_freqtradebot(mocker, default_conf)
7
2023-10-21 10:02:05+00:00
24k
yanzhh/HGERE
transformers/src/transformers/modeling_bert.py
[ { "identifier": "gelu", "path": "transformers/src/transformers/activations.py", "snippet": "def swish(x):\ndef _gelu_python(x):\ndef gelu_new(x):\ndef get_activation(activation_string):\nACT2FN = {\n \"relu\": F.relu,\n \"swish\": swish,\n \"gelu\": gelu,\n \"tanh\": F.tanh,\n \"gelu_new\...
import logging import math import os import torch import torch.nn.functional as F import pdb import re import numpy as np import tensorflow as tf import pdb from symbol import factor from tkinter import E from torch import nn from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss from torch.nn.utils.rnn import pad_sequence from .modules import * from .activations import gelu, gelu_new, swish from .configuration_bert import BertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import PreTrainedModel, prune_linear_layer
16,669
self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model. """ # from .modules import BiaffineSpanRepr, BiaffineRelationCls, BiafEncoder, \ # CatEncoder, max_pool, Tetrafine, BiaffineMessagePasser, \ # LinearMessegePasser, CPDTrilinear, CatEncoderCross, \ # bilinear_classifier, BiafCrossEncoder logger = logging.getLogger(__name__) BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin", "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin", "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin", "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin", "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin", "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin", "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin", "bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin", "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin", "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin", "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin", "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin", "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin", "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin", "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin", "bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-pytorch_model.bin", "bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-pytorch_model.bin", "bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-pytorch_model.bin", "bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-pytorch_model.bin", "bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/pytorch_model.bin", "bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/pytorch_model.bin", "bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/pytorch_model.bin", } def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model. """ try: except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def mish(x): return x * torch.tanh(nn.functional.softplus(x)) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish} BertLayerNorm = torch.nn.LayerNorm class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = BertAttention(config) self.is_decoder = config.is_decoder if self.is_decoder: self.crossattention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.is_decoder and encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) try: self.use_full_layer = config.use_full_layer except: self.use_full_layer = -1 def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, full_attention_mask=None, ): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if i==self.use_full_layer: attention_mask = full_attention_mask if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """
@add_start_docstrings(
2
2023-10-15 02:31:09+00:00
24k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/urllib3/connectionpool.py
[ { "identifier": "_TYPE_BODY", "path": "backend/venv/lib/python3.10/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "HTTPHeaderDict", "path": "backend/venv/lib/python3.10/site-pack...
import errno import logging import queue import sys import typing import warnings import weakref import ssl from socket import timeout as SocketTimeout from types import TracebackType from ._base_connection import _TYPE_BODY from ._collections import HTTPHeaderDict from ._request_methods import RequestMethods from .connection import ( BaseSSLError, BrokenPipeError, DummyConnection, HTTPConnection, HTTPException, HTTPSConnection, ProxyConfig, _wrap_proxy_error, ) from .connection import port_by_scheme as port_by_scheme from .exceptions import ( ClosedPoolError, EmptyPoolError, FullPoolError, HostChangedError, InsecureRequestWarning, LocationValueError, MaxRetryError, NewConnectionError, ProtocolError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, ) from .response import BaseHTTPResponse from .util.connection import is_connection_dropped from .util.proxy import connection_requires_http_tunnel from .util.request import _TYPE_BODY_POSITION, set_file_position from .util.retry import Retry from .util.ssl_match_hostname import CertificateError from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout from .util.url import Url, _encode_target from .util.url import _normalize_host as normalize_host from .util.url import parse_url from .util.util import to_str from typing_extensions import Literal from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection
21,573
# Pool objects class ConnectionPool: """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls: ( type[BaseHTTPConnection] | type[BaseHTTPSConnection] ) = HTTPConnection def __init__( self, host: str, port: int | None = None, timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, maxsize: int = 1, block: bool = False, headers: typing.Mapping[str, str] | None = None, retries: Retry | bool | int | None = None,
from __future__ import annotations if typing.TYPE_CHECKING: log = logging.getLogger(__name__) _TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] _SelfT = typing.TypeVar("_SelfT") # Pool objects class ConnectionPool: """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls: ( type[BaseHTTPConnection] | type[BaseHTTPSConnection] ) = HTTPConnection def __init__( self, host: str, port: int | None = None, timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, maxsize: int = 1, block: bool = False, headers: typing.Mapping[str, str] | None = None, retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
28
2023-10-23 18:09:28+00:00
24k
zju3dv/nr_in_a_room
test/test_optim_pano.py
[ { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n ...
import sys import os import torch import numpy as np from PIL import Image from omegaconf import OmegaConf from optim.room_optimizer import RoomOptimizer from optim.misc_utils import ( read_real_scene_localization, read_real_scene_localization_with_name, read_testing_config, )
14,879
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): active_instance_id = config.active_instance_id image_path = config.test_image_path dataset_config = config.dataset_config["dataset"] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] img_wh = config.img_wh # read image input_rgb = Image.open(image_path) input_rgb = input_rgb.resize(img_wh, Image.LANCZOS) input_rgb = np.array(input_rgb) input_rgb = torch.from_numpy(input_rgb).float() / 255 # (H, W, 3) # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=config.scene_info_json, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, N_samples=64, N_importance=128, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, # relation_info=relation_info, relation_info={}, output_path="debug", prefix=config.prefix, active_instance_id=active_instance_id, lr=1e-2, # lr=5e-2, N_optim_step=500, adjust_lr_per_step=0, optim_batch_size=1024, # optim_batch_size=2048, # optim_batch_size=4096, # use_amp=False, use_amp=True, optimize_light_env=True, optimize_appearance_code=config.get("optimize_appearance_code", False), mask_per_object=False, bbox_ray_intersect=True, bbox_enlarge=0.1, optimize_option=[ "keypoint_mask", "photometric_loss", # "perceptual_loss", "z_axis_align_loss", "object_room_wall_attach", "object_room_floor_attach", "physical_violation", # "physical_violation_delayed_start", "object_object_attach", "viewing_constraint", "optimize_exposure", "regenerate_relation_during_test", # "visualize_pred", # "print_loss_dict", ], ) # room_optimizer.set_sampling_mask_from_seg( # seg_mask=None, # seg_mask_path=config.seg_mask_path, # # add_noise_to_seg=0, # add_noise_to_seg=5, # dilate mask # convert_seg_mask_to_box_mask=True, # # convert_seg_mask_to_box_mask=False, # ) room_optimizer.set_sampling_mask_from_seg( # seg_mask=torch.ones_like(input_rgb[:, :, 0]).numpy() * 31, seg_mask_path=config.seg_mask_path, # add_noise_to_seg=0, add_noise_to_seg=5, # dilate mask convert_seg_mask_to_box_mask=False, # convert_seg_mask_to_box_mask=False, ) if "obj_prediction_json" in config: room_optimizer.set_initial_pose_from_prediction(config["obj_prediction_json"]) else: room_optimizer.set_initial_object_poses_from_scene_meta() # dump config config["optimize_option"] = room_optimizer.optimize_option OmegaConf.save( config=config, f=os.path.join(room_optimizer.output_path, "optim_config_full.yaml"), ) room_optimizer.generate_relation()
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): active_instance_id = config.active_instance_id image_path = config.test_image_path dataset_config = config.dataset_config["dataset"] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] img_wh = config.img_wh # read image input_rgb = Image.open(image_path) input_rgb = input_rgb.resize(img_wh, Image.LANCZOS) input_rgb = np.array(input_rgb) input_rgb = torch.from_numpy(input_rgb).float() / 255 # (H, W, 3) # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=config.scene_info_json, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, N_samples=64, N_importance=128, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, # relation_info=relation_info, relation_info={}, output_path="debug", prefix=config.prefix, active_instance_id=active_instance_id, lr=1e-2, # lr=5e-2, N_optim_step=500, adjust_lr_per_step=0, optim_batch_size=1024, # optim_batch_size=2048, # optim_batch_size=4096, # use_amp=False, use_amp=True, optimize_light_env=True, optimize_appearance_code=config.get("optimize_appearance_code", False), mask_per_object=False, bbox_ray_intersect=True, bbox_enlarge=0.1, optimize_option=[ "keypoint_mask", "photometric_loss", # "perceptual_loss", "z_axis_align_loss", "object_room_wall_attach", "object_room_floor_attach", "physical_violation", # "physical_violation_delayed_start", "object_object_attach", "viewing_constraint", "optimize_exposure", "regenerate_relation_during_test", # "visualize_pred", # "print_loss_dict", ], ) # room_optimizer.set_sampling_mask_from_seg( # seg_mask=None, # seg_mask_path=config.seg_mask_path, # # add_noise_to_seg=0, # add_noise_to_seg=5, # dilate mask # convert_seg_mask_to_box_mask=True, # # convert_seg_mask_to_box_mask=False, # ) room_optimizer.set_sampling_mask_from_seg( # seg_mask=torch.ones_like(input_rgb[:, :, 0]).numpy() * 31, seg_mask_path=config.seg_mask_path, # add_noise_to_seg=0, add_noise_to_seg=5, # dilate mask convert_seg_mask_to_box_mask=False, # convert_seg_mask_to_box_mask=False, ) if "obj_prediction_json" in config: room_optimizer.set_initial_pose_from_prediction(config["obj_prediction_json"]) else: room_optimizer.set_initial_object_poses_from_scene_meta() # dump config config["optimize_option"] = room_optimizer.optimize_option OmegaConf.save( config=config, f=os.path.join(room_optimizer.output_path, "optim_config_full.yaml"), ) room_optimizer.generate_relation()
real_room_loc = read_real_scene_localization_with_name("arrangement3")
2
2023-10-15 08:41:29+00:00
24k
WenzhengZhang/Seq2seqCoref
main_trainer.py
[ { "identifier": "DataArguments", "path": "arguments.py", "snippet": "class DataArguments:\n data_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to data directory\"}\n )\n\n max_train_len: Optional[int] = field(\n default=1536,\n metadata={\n ...
import logging import os import sys from transformers import HfArgumentParser, set_seed from transformers import AutoModelForSeq2SeqLM, \ DataCollatorForSeq2Seq, AutoConfig, AutoTokenizer from transformers.integrations import TensorBoardCallback from arguments import DataArguments, ModelArguments, CorefTrainingArguments \ as TrainingArguments from data import CorefDataset, JointDataset from constants import SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, \ COPY, CLUSTER_NEW, CLUSTERS, SENTENCE_START, SENTENCE_END, SPECIAL_IDS, \ NON_INT_SPECIAL_IDS, MARK_SPECIAL_IDS, MENTION_END_NON_INT_SPECIAL_IDS, \ MENTION_ENDS from trainer import CorefTrainer from data import ConstrainedDataCollator from model import ConstrainedT5
20,269
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END,
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END,
COPY])
9
2023-10-17 17:39:16+00:00
24k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It...
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
14,800
# update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb:
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6)) state = TrainState.create( apply_fn=model.apply, params=params, tx=self.optimizer, opt_state_params=self.optimizer.init(params), rng=rng, ema_params=params, ) train_step_fn = construct_train_step(self.optimizer, self.loss_obj.construct_loss_fn(model)) sample_fn = construct_sampling_fn(model, self.sampler) # Resume training when intermediate checkpoints are detected if self.training_config.resume_training: pylogger.warning("Resuming training from the latest checkpoint.") if self.logging.use_wandb and self.model_name != "local": model_file = wandb.use_artifact(self.model_name).download() state = restore_checkpoint(ckpt_dir=model_file, prefix="checkpoint_", target=state) else: state = checkpoints.restore_checkpoint(ckpt_dir=self.checkpoint_dir, target=state) return run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input def train_step( self, train_step_fn: Callable, carry_state: Tuple, batch: jnp.ndarray, batch_input: jnp.ndarray, ) -> Tuple: """Perform a single training step, updating the model parameters. Args: train_step_fn (Callable): The train step function. carry_state (Tuple): The current state of the model and optimizer. batch (jnp.ndarray): The batch of data used for training. batch_input (jnp.ndarray): The input data to the model. Returns: Tuple: The updated state after performing the training step. """ (rng, state) = carry_state ( new_rng, loss, loss_inner, new_params, new_optim_state, batch_reconstructed, batch_corrupted, target, ) = train_step_fn( rng, state.params, state.opt_state_params, state.step, batch_input, batch, ) ema_rate = self.training_config.ema_rate new_params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_rate + p * (1.0 - ema_rate), state.ema_params, new_params, ) # update the state new_state = state.replace( rng=flax.jax_utils.unreplicate(new_rng), step=state.step + 1, opt_state_params=new_optim_state, params=new_params, ema_params=new_params_ema, ) new_carry_state = (new_rng, new_state) loss = flax.jax_utils.unreplicate(loss) step = int(flax_utils.unreplicate(state.step)) # Log the training progress if jax.host_id() == 0 and step % self.training_config.log_freq == 0: pylogger.info("step: %d, training_loss: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, "loss": loss}, step=step) if loss_inner is not None: loss_inner = flax.jax_utils.unreplicate(loss_inner) for inner_step, loss in enumerate(loss_inner): pylogger.info("step: %d, training_loss_inner: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, f"loss inner step {inner_step}": loss}, step=step) return new_carry_state, batch_reconstructed, batch_corrupted, target def save_checkpoint(self, step, run, state): pylogger.info("Saving the model at step %d." % (step,)) # Log the evaluation progress # Save the model parameters ( params, opt_state_params, step_, ema_params, ) = flax_utils.unreplicate( ( state.params, state.opt_state_params, state.step, state.ema_params, ) ) saved_state = state.replace( step=step_, opt_state_params=opt_state_params, params=params, ema_params=ema_params, ) checkpoint_file = checkpoints.save_checkpoint( self.checkpoint_dir, saved_state, step=step_ // self.training_config.eval_freq, keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember def train(self, model: linen.Module, ds_train: BaseDataset, sde: SDE) -> None: """Train the model with optional evaluation and logging. This method encapsulates the entire training process including initialization, training loop, checkpointing, evaluation, and logging. It supports different sampling types like colorization, inpainting, super resolution, and deblurring. Args: model (linen.Module): The model to be trained. ds_train (BaseDataset): The training dataset. sde (SDE): Stochastic differential equation object, governing the dynamics for sampling. Raises: ValueError: If an unsupported dataset type is provided. Note: The method leverages the Weights & Biases (wandb) platform for logging and checkpointing, make sure it's configured properly if logging is enabled. """ run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input = self.initialize_run( model, ds_train, sde ) # `state.step` is JAX integer on the GPU/TPU devices start_step = int(state.step) rng = state.rng # Replicate the train state on all devices ( p_params, p_opt_state_params, p_step, p_ema_params, p_batch_input, ) = flax_utils.replicate( ( state.params, state.opt_state_params, state.step, state.ema_params, batch_input, ) ) # update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb:
if isinstance(ds_train, ImageDataset):
1
2023-10-24 22:01:35+00:00
24k
violet-sto/HN-GFN
main_mobo.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.t...
from collections import defaultdict from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet, MOReinforce from utils.utils import set_random_seed from utils.metrics import compute_success, compute_diversity, compute_novelty, compute_correlation, circle_points from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from main import RolloutWorker, get_test_mols from pymoo.util.ref_dirs import get_reference_directions from copy import deepcopy import random import os import re import argparse import json import time import threading import pdb import pickle import gzip import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np import warnings
15,576
picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB':
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug',action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/mobo') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_init_examples", default=200, type=int) parser.add_argument("--num_outer_loop_iters", default=8, type=int) parser.add_argument("--num_samples", default=100, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') parser.add_argument("--log_weight_score", action='store_true', default=False) # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3,qed,sa') parser.add_argument("--acq_fn", default='UCB', type=str) parser.add_argument("--beta", default=0.1, type=float) parser.add_argument("--scalar", default='WeightedSum', type=str) parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1,1,1', type=str) # Proxy parser.add_argument("--proxy_normalize", action='store_true', default=False, help='normalize Y') parser.add_argument("--proxy_num_iterations", default=10000, type=int) parser.add_argument("--proxy_learning_rate", default=2.5e-4, help="Learning rate", type=float) parser.add_argument("--proxy_mbsize", default=64, help="Minibatch size", type=int) parser.add_argument("--proxy_early_stop_tol", default=10, type=int) parser.add_argument("--proxy_repr_type", default='atom_graph') parser.add_argument("--proxy_model_version", default='v2') parser.add_argument("--proxy_num_conv_steps", default=12, type=int) parser.add_argument("--proxy_nemb", default=64, help="#hidden", type=int) parser.add_argument("--proxy_weight_decay", default=1e-6, help="Weight Decay in Proxy", type=float) parser.add_argument("--proxy_uncertainty", default="evidential", type=str) # deep ensemble and GP parser.add_argument("--proxy_dropout", default=0.1, help="MC Dropout in Proxy", type=float) parser.add_argument("--proxy_num_dropout_samples", default=5, type=int) parser.add_argument("--evidential_lam", default=0.1, type=float) parser.add_argument( "--fp_radius", type=int, default=2, help="Morgan fingerprint radius." ) parser.add_argument( "--fp_nbits", type=int, default=1024, help="Morgan fingerprint nBits." ) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=5000, type=int) parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=8, type=int) parser.add_argument("--offline_mbsize", default=8, type=int) parser.add_argument("--hindsight_prob", default=0.2, type=float) parser.add_argument("--hindsight_buffer_mbsize", default=8, type=int) parser.add_argument("--hindsight_trajectories_mbsize", default=8, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=1, type=float) parser.add_argument("--reward_exp", default=8, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) parser.add_argument("--logit_clipping", default=0., type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=1, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8)**4, type=float) parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--condition_type", type=str, default='HN') parser.add_argument("--ray_hidden_dim", default=100, type=int) return parser.parse_args() class BoRolloutWorker(RolloutWorker): def __init__(self, args, bpath, proxy, device): super(BoRolloutWorker, self).__init__(args, bpath, proxy, device) self.hindsight_prob = args.hindsight_prob self.hindsight_mols = defaultdict(list) self.hindsight_smiles = defaultdict(list) self.replay_threshold = 0.9 def _get(self, i, dset, weights=None): # Sample trajectories by walking backwards from the molecules in our dataset # Handle possible multithreading issues when independent threads # add/substract from dset: m = dset[i] if not isinstance(m, BlockMoleculeDataExtended): m = m[-1] r, raw_r = self._get_reward(m, weights) done = 1 samples = [] # a sample is a tuple (parents(s), parent actions, reward(s), s, done) # an action is (blockidx, stemidx) or (-1, x) for 'stop' # so we start with the stop action, unless the molecule is already # a "terminal" node (if it has no stems, no actions). if len(m.stems) and len(m.blocks) < self.max_blocks: samples.append(((m,), ((-1, 0),), weights, weights, r, m, done)) r = done = 0 while len(m.blocks): # and go backwards if self.ignore_parents: parents = self.mdp.parents(m) parent, action = parents[self.train_rng.randint(len(parents))] samples.append(((parent,), (action,), weights, weights, r, m, done)) r = done = 0 m = parent else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat(len(parents), 1), weights, r, m, done)) r = done = 0 m = parents[self.train_rng.randint(len(parents))] return samples[::-1] def _add_mol_to_replay(self, m): for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) if len(self.hindsight_mols[i]) < self.max_hindsight_mols or raw_r[0] > self.hindsight_mols[i][0][0]: if m.smiles not in self.hindsight_smiles[i]: self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) self.hindsight_smiles[i].append(m.smiles) if len(self.hindsight_mols[i]) > self.max_hindsight_mols: self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0]))[ max(int(0.05 * self.max_hindsight_mols), 1):] self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def _add_mol_to_online(self, r, m, inflow): if self.replay_mode == 'online': r = r + self.train_rng.normal() * 0.01 if len(self.online_mols) < self.max_online_mols or r > self.online_mols[0][0]: self.online_mols.append((r, m)) if len(self.online_mols) > self.max_online_mols: self.online_mols = sorted(self.online_mols)[ max(int(0.05 * self.max_online_mols), 1):] elif self.replay_mode == 'prioritized': self.online_mols.append((abs(inflow - np.log(r)), m)) if len(self.online_mols) > self.max_online_mols * 1.1: self.online_mols = self.online_mols[-self.max_online_mols:] def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get reward from proxy raw_reward, score = self.proxy(m, weights) raw_reward = raw_reward.clip(self.reward_min) reward = self.l2r(raw_reward) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, Y_bounds=None, use_rand_policy=True): if self.train_rng.uniform() < self.hindsight_prob: idx = self.train_rng.randint(self.test_weights.shape[0]) weights = self.test_weights[idx].unsqueeze(0) samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.hindsight_trajectories_mbsize)), []) if self.args.hindsight_buffer_mbsize > 0: buffer = deepcopy(self.hindsight_mols[idx]) reward = np.array([x[0] for x in buffer]) prob = reward / sum(reward) eidx = np.random.choice(list(range(len(buffer))), self.args.hindsight_buffer_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, buffer, weights) for i in eidx), []) samples += offline_samples else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights, replay=True) for i in range(self.args.trajectories_mbsize)), []) # offline sampling from dataset if self.args.offline_mbsize > 0 and dataset is not None: # use the oracle reward scores = torch.tensor(pd.DataFrame.from_dict(dataset.scores).values, dtype=torch.float32).to(args.device) if Y_bounds is not None: scores = normalize(scores, Y_bounds) reward = torch.matmul(scores, weights.reshape(-1, 1)) prob = (reward / sum(reward)).squeeze(1).cpu().numpy() eidx = np.random.choice(list(range(len(dataset.all_mols))), self.args.offline_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, dataset.all_mols, weights) for i in eidx), []) samples += offline_samples return zip(*samples) def initialize_hindsight_mols(self, dataset): for m in dataset.all_mols: for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) for i, weights in enumerate(self.test_weights): self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0])) self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def train_generative_model(args, generator, bpath, proxy, oracle, dataset, test_weights, round_idx, do_save=False): print("Training generator...") os.makedirs(os.path.join(args.log_dir, f'round_{round_idx}'), exist_ok=True) device = args.device rollout_worker = BoRolloutWorker(args, bpath, proxy, device) rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.initialize_hindsight_mols(dataset) Y_bounds = torch.stack([proxy.partitioning.Y.min(dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) def save_stuff(round_idx, iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, 'round_{}/{}_generator_checkpoint.pth'.format(round_idx, iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] time_last_check = time.time() for i in range(args.num_iterations + 1): if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, Y_bounds, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(i), 3) for i in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/round{}/train'.format(round_idx), train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, volume_oracle, reward_weight, reward_mean, test_loss, diversity = sample_batch( args, generator, rollout_worker, oracle, proxy, Y_bounds, compute_multi_objective_metric=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes'.format(round_idx), volume, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes_oracle'.format(round_idx), volume_oracle, use_context=False) args.logger.add_scalars( 'round{}/Top-100-sampled/reward_weight'.format(round_idx), reward_weight, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/reward_mean'.format(round_idx), reward_mean, use_context=False) # reward_mean is a dict, the keys are test_weights args.logger.add_scalar( 'round{}/Top-100-sampled/test_loss'.format(round_idx), test_loss, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/dists'.format(round_idx), diversity, use_context=False) if do_save: save_stuff(round_idx, i) stop_everything() if do_save: save_stuff(round_idx, i) checkpoint_path = os.path.join(args.log_dir, f'round_{round_idx}/{i}_generator_checkpoint.pth') generator.load_state_dict(torch.load(checkpoint_path)) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def sample_batch(args, generator, rollout_worker, oracle=None, proxy=None, ref_mols=None, Y_bounds=None, compute_multi_objective_metric=False): score_succ = {'gsk3b': 0.5, 'jnk3': 0.5, 'drd2': 0.5, 'chemprop_sars': 0.5, 'chemprop_hiv': 0.5, "seh": 0.5, 'qed': 0.6, 'sa': 0.67} if Y_bounds is None: Y_bounds = torch.stack([proxy.partitioning.Y.min( dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) time_start = time.time() print(f"Sampling molecules...") raw_rewards = [] raw_rewards_weight = {} means = [] picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB':
generator = TBGFlowNet(args, bpath)
6
2023-10-24 14:10:35+00:00
24k
caglarkucuk/earthformer-satellite-to-radar
ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_unet_dec.py
[ { "identifier": "Upsample3DLayer", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class Upsample3DLayer(nn.Module):\n \"\"\"Upsampling based on nn.UpSampling and Conv3x3.\n\n If the temporal dimension remains the same:\n x --> interpolation-2d (neares...
from typing import Sequence, Union from functools import lru_cache from collections import OrderedDict from torch import nn from einops import rearrange from .cuboid_transformer import ( Upsample3DLayer, PatchMerging3D, PosEmbed, InitialEncoder, FinalDecoder, InitialStackPatchMergingEncoder, FinalStackUpsamplingDecoder, StackCuboidSelfAttentionBlock, StackCuboidCrossAttentionBlock, CuboidTransformerEncoder) from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns from .utils import ( get_activation, get_norm_layer, _generalize_padding, _generalize_unpadding, apply_initialization, round_to) import warnings import torch import torch.nn.functional as F import torch.utils.checkpoint as checkpoint
17,519
downsample_type=downsample_type, block_attn_patterns=enc_attn_patterns, block_cuboid_size=enc_cuboid_size, block_strategy=enc_cuboid_strategy, block_shift_size=enc_shift_size, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, ffn_activation=ffn_activation, norm_layer=norm_layer, use_inter_ffn=enc_use_inter_ffn, padding_type=padding_type, use_global_vector=num_global_vectors > 0, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, down_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, ) self.enc_pos_embed = PosEmbed( embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in) mem_shapes = self.encoder.get_mem_shapes() self.dec_pos_embed = PosEmbed( embed_dim=mem_shapes[-1][-1], typ=pos_embed_type, maxT=T_out, maxH=mem_shapes[-1][1], maxW=mem_shapes[-1][2]) self.unet_dec_cross_mode = unet_dec_cross_mode self.decoder = CuboidTransformerUNetDecoder( target_temporal_length=T_out, mem_shapes=mem_shapes, cross_start=dec_cross_start, depth=dec_depth, upsample_type=upsample_type, block_self_attn_patterns=dec_self_attn_patterns, block_self_cuboid_size=dec_self_cuboid_size, block_self_shift_size=dec_self_shift_size, block_self_cuboid_strategy=dec_self_cuboid_strategy, block_cross_attn_patterns=dec_cross_attn_patterns, block_cross_cuboid_hw=dec_cross_cuboid_hw, block_cross_shift_hw=dec_cross_shift_hw, block_cross_cuboid_strategy=dec_cross_cuboid_strategy, block_cross_n_temporal=dec_cross_n_temporal, cross_last_n_frames=dec_cross_last_n_frames, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, upsample_kernel_size=upsample_kernel_size, ffn_activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=dec_use_inter_ffn, max_temporal_relative=T_in + T_out, padding_type=padding_type, hierarchical_pos_embed=dec_hierarchical_pos_embed, pos_embed_type=pos_embed_type, use_self_global=(num_global_vectors > 0) and use_dec_self_global, self_update_global=dec_self_update_global, use_cross_global=(num_global_vectors > 0) and use_dec_cross_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, up_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, # different from CuboidTransformerDecoder downsample=downsample, downsample_type=downsample_type, cross_mode=unet_dec_cross_mode, down_linear_init_mode=down_up_linear_init_mode, ) self.reset_parameters() def get_initial_encoder_final_decoder( self, initial_downsample_type, activation, # initial_downsample_type=="conv" initial_downsample_scale, initial_downsample_conv_layers, final_upsample_conv_layers, padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list, ): T_in, H_in, W_in, C_in = self.input_shape T_out, H_out, W_out, C_out = self.target_shape # Construct the initial upsampling / downsampling layers self.initial_downsample_type = initial_downsample_type if self.initial_downsample_type == "conv": if isinstance(initial_downsample_scale, int): initial_downsample_scale = (1, initial_downsample_scale, initial_downsample_scale) elif len(initial_downsample_scale) == 2: initial_downsample_scale = (1, *initial_downsample_scale) elif len(initial_downsample_scale) == 3: initial_downsample_scale = tuple(initial_downsample_scale) else: raise NotImplementedError(f"initial_downsample_scale {initial_downsample_scale} format not supported!") # if any(ele > 1 for ele in initial_downsample_scale):
"""CuboidTransformer adapted for auxiliary inputs in decoder""" class CuboidTransformerUNetDecoder(nn.Module): """U-Net style Decoder of the CuboidTransformer. For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention We add cross attention following 3 modes: cross_mode == "down": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "up": x --> attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "both": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ ^ ^ | | | | | | | | mem mem mem mem """ def __init__(self, target_temporal_length, mem_shapes, cross_start=0, depth=[2, 2], upsample_type="upsample", upsample_kernel_size=3, block_self_attn_patterns=None, block_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], block_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], block_self_shift_size=[(1, 1, 1), (0, 0, 0)], block_cross_attn_patterns=None, block_cross_cuboid_hw=[(4, 4), (4, 4)], block_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], block_cross_shift_hw=[(0, 0), (0, 0)], block_cross_n_temporal=[1, 2], cross_last_n_frames=None, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', use_inter_ffn=False, hierarchical_pos_embed=False, pos_embed_type='t+hw', max_temporal_relative=50, padding_type='ignore', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # global vectors use_self_global=False, self_update_global=True, use_cross_global=False, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", up_linear_init_mode="0", norm_init_mode="0", # different from `CuboidTransformerDecoder`, no arg `use_first_self_attn=False` downsample=2, downsample_type='patch_merge', cross_mode="up", down_linear_init_mode="0", ): """ Parameters ---------- target_temporal_length mem_shapes cross_start The block to start cross attention depth Depth of each block downsample The downsample ratio downsample_type Type of the downsampling layer upsample_type The type of the upsampling layers upsample_kernel_size block_self_attn_patterns Pattern of the block self attentions block_self_cuboid_size block_self_cuboid_strategy block_self_shift_size block_cross_attn_patterns block_cross_cuboid_hw block_cross_cuboid_strategy block_cross_shift_hw block_cross_n_temporal cross_last_n_frames cross_mode Must be one of ("up", "down", "both") Control whether the upsampling/downsampling/both phases cross attend to the encoded latent features num_heads attn_drop proj_drop ffn_drop ffn_activation gated_ffn Whether to enable gated ffn or not norm_layer The normalization layer use_inter_ffn Whether to use intermediate FFN hierarchical_pos_embed Whether to add pos embedding for each hierarchy. max_temporal_relative padding_type checkpoint_level """ super(CuboidTransformerUNetDecoder, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.up_linear_init_mode = up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(depth) == len(mem_shapes) self.target_temporal_length = target_temporal_length self.num_blocks = len(mem_shapes) self.cross_start = cross_start self.mem_shapes = mem_shapes self.block_units = tuple(mem_shape[-1] for mem_shape in self.mem_shapes) self.depth = depth if not isinstance(downsample, (tuple, list)): downsample = (1, downsample, downsample) self.downsample = downsample self.downsample_type = downsample_type self.upsample_type = upsample_type self.hierarchical_pos_embed = hierarchical_pos_embed self.checkpoint_level = checkpoint_level self.use_self_global = use_self_global self.self_update_global = self_update_global self.use_cross_global = use_cross_global self.use_global_vector_ffn = use_global_vector_ffn assert cross_mode in ["up", "down", "both"], f"Invalid cross_mode {cross_mode}!" self.cross_mode = cross_mode self.up_use_cross = self.cross_mode in ["up", "both"] self.down_use_cross = self.cross_mode in ["down", "both"] if self.num_blocks > 1: # Construct downsampling layers if downsample_type == 'patch_merge': self.downsample_layers = nn.ModuleList( [PatchMerging3D(dim=self.block_units[i], downsample=downsample, # downsample=(1, 1, 1), padding_type=padding_type, out_dim=self.block_units[i + 1], linear_init_mode=down_linear_init_mode, norm_init_mode=norm_init_mode) for i in range(self.num_blocks - 1)]) else: raise NotImplementedError # Construct upsampling layers if self.upsample_type == "upsample": self.upsample_layers = nn.ModuleList([ Upsample3DLayer( dim=self.mem_shapes[i + 1][-1], out_dim=self.mem_shapes[i][-1], target_size=(target_temporal_length,) + self.mem_shapes[i][1:3], kernel_size=upsample_kernel_size, temporal_upsample=False, conv_init_mode=conv_init_mode, ) for i in range(self.num_blocks - 1)]) else: raise NotImplementedError if self.hierarchical_pos_embed: self.down_hierarchical_pos_embed_l = nn.ModuleList([ PosEmbed(embed_dim=self.block_units[i], typ=pos_embed_type, maxT=self.mem_shapes[i][0], maxH=self.mem_shapes[i][1], maxW=self.mem_shapes[i][2]) for i in range(self.num_blocks - 1)]) self.up_hierarchical_pos_embed_l = nn.ModuleList([ PosEmbed(embed_dim=self.block_units[i], typ=pos_embed_type, maxT=self.mem_shapes[i][0], maxH=self.mem_shapes[i][1], maxW=self.mem_shapes[i][2]) for i in range(self.num_blocks - 1)]) if block_self_attn_patterns is not None: if isinstance(block_self_attn_patterns, (tuple, list)): assert len(block_self_attn_patterns) == self.num_blocks else: block_self_attn_patterns = [block_self_attn_patterns for _ in range(self.num_blocks)] block_self_cuboid_size = [] block_self_cuboid_strategy = [] block_self_shift_size = [] for idx, key in enumerate(block_self_attn_patterns): func = CuboidSelfAttentionPatterns.get(key) cuboid_size, strategy, shift_size = func(mem_shapes[idx]) block_self_cuboid_size.append(cuboid_size) block_self_cuboid_strategy.append(strategy) block_self_shift_size.append(shift_size) else: if not isinstance(block_self_cuboid_size[0][0], (list, tuple)): block_self_cuboid_size = [block_self_cuboid_size for _ in range(self.num_blocks)] else: assert len(block_self_cuboid_size) == self.num_blocks,\ f'Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}' if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)): block_self_cuboid_strategy = [block_self_cuboid_strategy for _ in range(self.num_blocks)] else: assert len(block_self_cuboid_strategy) == self.num_blocks,\ f'Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}' if not isinstance(block_self_shift_size[0][0], (list, tuple)): block_self_shift_size = [block_self_shift_size for _ in range(self.num_blocks)] else: assert len(block_self_shift_size) == self.num_blocks,\ f'Incorrect input format! Received block_self_shift_size={block_self_shift_size}' down_self_blocks = [] up_self_blocks = [] for i in range(self.num_blocks): ele_depth = depth[i] stack_cuboid_blocks =\ [StackCuboidSelfAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_size=block_self_cuboid_size[i], block_strategy=block_self_cuboid_strategy[i], block_shift_size=block_self_shift_size[i], attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, padding_type=padding_type, use_global_vector=use_self_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(ele_depth)] down_self_blocks.append(nn.ModuleList(stack_cuboid_blocks)) stack_cuboid_blocks = \ [StackCuboidSelfAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_size=block_self_cuboid_size[i], block_strategy=block_self_cuboid_strategy[i], block_shift_size=block_self_shift_size[i], attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, padding_type=padding_type, use_global_vector=use_self_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(ele_depth)] up_self_blocks.append(nn.ModuleList(stack_cuboid_blocks)) self.down_self_blocks = nn.ModuleList(down_self_blocks) self.up_self_blocks = nn.ModuleList(up_self_blocks) if block_cross_attn_patterns is not None: if isinstance(block_cross_attn_patterns, (tuple, list)): assert len(block_cross_attn_patterns) == self.num_blocks else: block_cross_attn_patterns = [block_cross_attn_patterns for _ in range(self.num_blocks)] block_cross_cuboid_hw = [] block_cross_cuboid_strategy = [] block_cross_shift_hw = [] block_cross_n_temporal = [] for idx, key in enumerate(block_cross_attn_patterns): if key == "last_frame_dst": cuboid_hw = None shift_hw = None strategy = None n_temporal = None else: func = CuboidCrossAttentionPatterns.get(key) cuboid_hw, shift_hw, strategy, n_temporal = func(mem_shapes[idx]) block_cross_cuboid_hw.append(cuboid_hw) block_cross_cuboid_strategy.append(strategy) block_cross_shift_hw.append(shift_hw) block_cross_n_temporal.append(n_temporal) else: if not isinstance(block_cross_cuboid_hw[0][0], (list, tuple)): block_cross_cuboid_hw = [block_cross_cuboid_hw for _ in range(self.num_blocks)] else: assert len(block_cross_cuboid_hw) == self.num_blocks, \ f'Incorrect input format! Received block_cross_cuboid_hw={block_cross_cuboid_hw}' if not isinstance(block_cross_cuboid_strategy[0][0], (list, tuple)): block_cross_cuboid_strategy = [block_cross_cuboid_strategy for _ in range(self.num_blocks)] else: assert len(block_cross_cuboid_strategy) == self.num_blocks, \ f'Incorrect input format! Received block_cross_cuboid_strategy={block_cross_cuboid_strategy}' if not isinstance(block_cross_shift_hw[0][0], (list, tuple)): block_cross_shift_hw = [block_cross_shift_hw for _ in range(self.num_blocks)] else: assert len(block_cross_shift_hw) == self.num_blocks, \ f'Incorrect input format! Received block_cross_shift_hw={block_cross_shift_hw}' if not isinstance(block_cross_n_temporal[0], (list, tuple)): block_cross_n_temporal = [block_cross_n_temporal for _ in range(self.num_blocks)] else: assert len(block_cross_n_temporal) == self.num_blocks, \ f'Incorrect input format! Received block_cross_n_temporal={block_cross_n_temporal}' if self.up_use_cross: self.up_cross_blocks = nn.ModuleList() for i in range(self.cross_start, self.num_blocks): cross_block = nn.ModuleList( [StackCuboidCrossAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_hw=block_cross_cuboid_hw[i], block_strategy=block_cross_cuboid_strategy[i], block_shift_hw=block_cross_shift_hw[i], block_n_temporal=block_cross_n_temporal[i], cross_last_n_frames=cross_last_n_frames, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, activation=ffn_activation, max_temporal_relative=max_temporal_relative, padding_type=padding_type, use_global_vector=use_cross_global, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(depth[i])]) self.up_cross_blocks.append(cross_block) if self.down_use_cross: self.down_cross_blocks = nn.ModuleList() for i in range(self.cross_start, self.num_blocks): cross_block = nn.ModuleList( [StackCuboidCrossAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_hw=block_cross_cuboid_hw[i], block_strategy=block_cross_cuboid_strategy[i], block_shift_hw=block_cross_shift_hw[i], block_n_temporal=block_cross_n_temporal[i], cross_last_n_frames=cross_last_n_frames, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, activation=ffn_activation, max_temporal_relative=max_temporal_relative, padding_type=padding_type, use_global_vector=use_cross_global, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(depth[i])]) self.down_cross_blocks.append(cross_block) self.reset_parameters() def reset_parameters(self): for ms in self.down_self_blocks: for m in ms: m.reset_parameters() for ms in self.up_self_blocks: for m in ms: m.reset_parameters() if self.up_use_cross: for ms in self.up_cross_blocks: for m in ms: m.reset_parameters() if self.down_use_cross: for ms in self.down_cross_blocks: for m in ms: m.reset_parameters() if self.num_blocks > 1: for m in self.downsample_layers: m.reset_parameters() for m in self.upsample_layers: m.reset_parameters() if self.hierarchical_pos_embed: for m in self.down_hierarchical_pos_embed_l: m.reset_parameters() for m in self.up_hierarchical_pos_embed_l: m.reset_parameters() def forward(self, x, mem_l, mem_global_vector_l=None): """ Parameters ---------- x Shape (B, T, H, W, C) mem_l A list of memory tensors Returns ------- out """ B, T, H, W, C = x.shape assert T == self.target_temporal_length assert (H, W) == (self.mem_shapes[0][1], self.mem_shapes[0][2]) new_mem_global_vector_l = [] for i in range(self.num_blocks): # Downample if i > 0: x = self.downsample_layers[i - 1](x) if self.hierarchical_pos_embed: x = self.down_hierarchical_pos_embed_l[i - 1](x) mem_global_vector = None if mem_global_vector_l is None else mem_global_vector_l[i] for idx in range(self.depth[i]): if self.use_self_global: if self.self_update_global: x, mem_global_vector = self.down_self_blocks[i][idx](x, mem_global_vector) else: x, _ = self.down_self_blocks[i][idx](x, mem_global_vector) else: x = self.down_self_blocks[i][idx](x) if self.down_use_cross and i >= self.cross_start: x = self.down_cross_blocks[i - self.cross_start][idx](x, mem_l[i], mem_global_vector) new_mem_global_vector_l.append(mem_global_vector) for i in range(self.num_blocks - 1, -1, -1): mem_global_vector = new_mem_global_vector_l[i] for idx in range(self.depth[i]): if self.use_self_global: if self.self_update_global: x, mem_global_vector = self.up_self_blocks[i][idx](x, mem_global_vector) else: x, _ = self.up_self_blocks[i][idx](x, mem_global_vector) else: x = self.up_self_blocks[i][idx](x) if self.up_use_cross and i >= self.cross_start: x = self.up_cross_blocks[i - self.cross_start][idx](x, mem_l[i], mem_global_vector) # Upsample if i > 0: x = self.upsample_layers[i - 1](x) if self.hierarchical_pos_embed: x = self.up_hierarchical_pos_embed_l[i - 1](x) return x class CuboidTransformerAuxModel(nn.Module): """Cuboid Transformer with auxiliary input in decoder for spatiotemporal forecasting We adopt the Non-autoregressive encoder-decoder architecture. The decoder takes the multi-scale memory output from the encoder, as well as auxiliary input. The initial downsampling / upsampling layers will be Downsampling: [K x Conv2D --> PatchMerge] Upsampling: [Nearest Interpolation-based Upsample --> K x Conv2D] x -----------> downsample (optional) ---> (+pos_embed) ---> enc ---------> mem_l | | |------------------| | | aux_input ---> downsample (optional) ---> (+pos_embed) ---> enc -> cross_attn -> dec -> upsample (optional) -> y """ def __init__(self, input_shape, target_shape, base_units=128, block_units=None, scale_alpha=1.0, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, # inter-attn downsample/upsample downsample=2, downsample_type='patch_merge', upsample_type="upsample", upsample_kernel_size=3, # encoder enc_depth=[4, 4, 4], enc_attn_patterns=None, enc_cuboid_size=[(4, 4, 4), (4, 4, 4)], enc_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], enc_shift_size=[(0, 0, 0), (0, 0, 0)], enc_use_inter_ffn=True, # decoder dec_depth=[2, 2], dec_cross_start=0, dec_self_attn_patterns=None, dec_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], dec_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], dec_self_shift_size=[(1, 1, 1), (0, 0, 0)], dec_cross_attn_patterns=None, dec_cross_cuboid_hw=[(4, 4), (4, 4)], dec_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], dec_cross_shift_hw=[(0, 0), (0, 0)], dec_cross_n_temporal=[1, 2], dec_cross_last_n_frames=None, dec_use_inter_ffn=True, dec_hierarchical_pos_embed=False, # global vectors num_global_vectors=4, use_dec_self_global=True, dec_self_update_global=True, use_dec_cross_global=True, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # # initial downsample and final upsample initial_downsample_type="conv", initial_downsample_activation="leaky", # initial_downsample_type=="conv" initial_downsample_scale=1, initial_downsample_conv_layers=2, final_upsample_conv_layers=2, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers=1, initial_downsample_stack_conv_dim_list=None, initial_downsample_stack_conv_downscale_list=[1, ], initial_downsample_stack_conv_num_conv_list=[2, ], # # end of initial downsample and final upsample ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', padding_type='ignore', pos_embed_type='t+hw', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", down_up_linear_init_mode="0", norm_init_mode="0", # different from CuboidTransformerModel, no arg `dec_use_first_self_attn=False` auxiliary_channels: int = 1, unet_dec_cross_mode="up", ): """ Parameters ---------- input_shape Shape of the input tensor. It will be (T, H, W, C_in) target_shape Shape of the input tensor. It will be (T_out, H, W, C_out) base_units The base units """ super(CuboidTransformerAuxModel, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.down_up_linear_init_mode = down_up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(enc_depth) == len(dec_depth) self.base_units = base_units self.num_global_vectors = num_global_vectors if global_dim_ratio != 1: assert separate_global_qkv == True, \ f"Setting global_dim_ratio != 1 requires separate_global_qkv == True." self.global_dim_ratio = global_dim_ratio self.input_shape = input_shape self.target_shape = target_shape T_in, H_in, W_in, C_in = input_shape T_out, H_out, W_out, C_out = target_shape assert H_in == H_out and W_in == W_out self.auxiliary_channels = auxiliary_channels if self.num_global_vectors > 0: self.init_global_vectors = nn.Parameter( torch.zeros((self.num_global_vectors, global_dim_ratio*base_units))) new_input_shape = self.get_initial_encoder_final_decoder( initial_downsample_scale=initial_downsample_scale, initial_downsample_type=initial_downsample_type, activation=initial_downsample_activation, # initial_downsample_type=="conv" initial_downsample_conv_layers=initial_downsample_conv_layers, final_upsample_conv_layers=final_upsample_conv_layers, padding_type=padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers=initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list=initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list=initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list=initial_downsample_stack_conv_num_conv_list, ) T_in, H_in, W_in, _ = new_input_shape self.encoder = CuboidTransformerEncoder( input_shape=(T_in, H_in, W_in, base_units), base_units=base_units, block_units=block_units, scale_alpha=scale_alpha, depth=enc_depth, downsample=downsample, downsample_type=downsample_type, block_attn_patterns=enc_attn_patterns, block_cuboid_size=enc_cuboid_size, block_strategy=enc_cuboid_strategy, block_shift_size=enc_shift_size, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, ffn_activation=ffn_activation, norm_layer=norm_layer, use_inter_ffn=enc_use_inter_ffn, padding_type=padding_type, use_global_vector=num_global_vectors > 0, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, down_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, ) self.enc_pos_embed = PosEmbed( embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in) mem_shapes = self.encoder.get_mem_shapes() self.dec_pos_embed = PosEmbed( embed_dim=mem_shapes[-1][-1], typ=pos_embed_type, maxT=T_out, maxH=mem_shapes[-1][1], maxW=mem_shapes[-1][2]) self.unet_dec_cross_mode = unet_dec_cross_mode self.decoder = CuboidTransformerUNetDecoder( target_temporal_length=T_out, mem_shapes=mem_shapes, cross_start=dec_cross_start, depth=dec_depth, upsample_type=upsample_type, block_self_attn_patterns=dec_self_attn_patterns, block_self_cuboid_size=dec_self_cuboid_size, block_self_shift_size=dec_self_shift_size, block_self_cuboid_strategy=dec_self_cuboid_strategy, block_cross_attn_patterns=dec_cross_attn_patterns, block_cross_cuboid_hw=dec_cross_cuboid_hw, block_cross_shift_hw=dec_cross_shift_hw, block_cross_cuboid_strategy=dec_cross_cuboid_strategy, block_cross_n_temporal=dec_cross_n_temporal, cross_last_n_frames=dec_cross_last_n_frames, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, upsample_kernel_size=upsample_kernel_size, ffn_activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=dec_use_inter_ffn, max_temporal_relative=T_in + T_out, padding_type=padding_type, hierarchical_pos_embed=dec_hierarchical_pos_embed, pos_embed_type=pos_embed_type, use_self_global=(num_global_vectors > 0) and use_dec_self_global, self_update_global=dec_self_update_global, use_cross_global=(num_global_vectors > 0) and use_dec_cross_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, up_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, # different from CuboidTransformerDecoder downsample=downsample, downsample_type=downsample_type, cross_mode=unet_dec_cross_mode, down_linear_init_mode=down_up_linear_init_mode, ) self.reset_parameters() def get_initial_encoder_final_decoder( self, initial_downsample_type, activation, # initial_downsample_type=="conv" initial_downsample_scale, initial_downsample_conv_layers, final_upsample_conv_layers, padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list, ): T_in, H_in, W_in, C_in = self.input_shape T_out, H_out, W_out, C_out = self.target_shape # Construct the initial upsampling / downsampling layers self.initial_downsample_type = initial_downsample_type if self.initial_downsample_type == "conv": if isinstance(initial_downsample_scale, int): initial_downsample_scale = (1, initial_downsample_scale, initial_downsample_scale) elif len(initial_downsample_scale) == 2: initial_downsample_scale = (1, *initial_downsample_scale) elif len(initial_downsample_scale) == 3: initial_downsample_scale = tuple(initial_downsample_scale) else: raise NotImplementedError(f"initial_downsample_scale {initial_downsample_scale} format not supported!") # if any(ele > 1 for ele in initial_downsample_scale):
self.initial_encoder = InitialEncoder(dim=C_in,
3
2023-10-23 11:45:50+00:00
24k
IBM/VillanDiffusion
loss.py
[ { "identifier": "Backdoor", "path": "dataset.py", "snippet": "class Backdoor():\n CHANNEL_LAST = -1\n CHANNEL_FIRST = -3\n \n GREY_BG_RATIO = 0.3\n \n STOP_SIGN_IMG = \"static/stop_sign_wo_bg.png\"\n # STOP_SIGN_IMG = \"static/stop_sign_bg_blk.jpg\"\n CAT_IMG = \"static/cat_wo_bg...
import copy import torch import torch.nn.functional as F import os from functools import partial from os import terminal_size from sched import scheduler from typing import Callable, Dict, List, Tuple, Union from torch import nn from matplotlib import pyplot as plt from dataset import Backdoor, DEFAULT_VMIN, DEFAULT_VMAX from model import DiffuserModelSched from diffusers import DDPMScheduler from dataset import DatasetLoader from model import DiffuserModelSched
17,665
ws.append((sigma_i ** 2 - residuals[i]) ** 0.5) return torch.Tensor(ws) def get_hs_ve(rhos_hat: torch.Tensor) -> torch.Tensor: hs = [rhos_hat[0]] residuals = [0] for i, rho_hat_i in enumerate(rhos_hat): if i < 1: continue residuals.append(hs[i - 1] + residuals[i - 1]) hs.append(rho_hat_i - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_ve(sigmas: torch.Tensor, rhos_hat: torch.Tensor, ws: torch.Tensor, hs: torch.Tensor, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") prev_rhos_hat = torch.roll(rhos_hat, 1, 0) prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat trojdiff_coef = ve_scale * (ws ** 2 * (rhos_hat - prev_rhos_hat) + hs * prev_sigmas) / (ws ** 2 * sigmas) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_gen_ve_reduce(sigmas: torch.Tensor, hs: torch.Tensor, rhos_hat_w: float=1.0, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") # prev_rhos_hat = torch.roll(rhos_hat, 1, 0) # prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat_w * sigmas trojdiff_coef = ve_scale * (sigmas * rhos_hat_w / (sigmas + prev_sigmas)) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_hs_vp(alphas: torch.Tensor, alphas_cumprod: torch.Tensor) -> torch.Tensor: hs = [(1 - alphas_cumprod[0]) ** 0.5] residuals = [0] for i, (alphas_cumprod_i, alphas_i) in enumerate(zip(alphas_cumprod, alphas)): if i < 1: continue residuals.append((alphas_i ** 0.5) * (hs[i - 1] + residuals[i - 1])) hs.append((1 - alphas_cumprod_i) ** 0.5 - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_vp(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, hs: torch.Tensor=None, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term baddiff_step = 1 - alphas_cumprod ** 0.5 baddiff_coef = vp_scale * (1 - alphas ** 0.5) * (1 - alphas_cumprod) ** 0.5 / (1 - alphas) # TrojDiff style correction term if psi != 1: if hs == None: raise ValueError(f"Arhuement hs shouldn't be {hs} when psi is {psi}") trojdiff_step = (1 - alphas_cumprod) ** 0.5 trojdiff_coef = - ve_scale * ((alphas ** 0.5 - 1) * (1 - alphas_cumprod) ** 0.5 * (1 - alphas) - hs * (alphas - alphas_cumprod)) / (1 - alphas) # Coefficients & Steps step = psi * baddiff_step + (1 - psi) * trojdiff_step coef = psi * baddiff_coef + (1 - psi) * trojdiff_coef else: # Coefficients & Steps step = baddiff_step coef = baddiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_elbo_gen(noise_sched, sde_type: str="vp", psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0, device=None, dtype=None, rhos_hat_w: float=1.0, rhos_hat_b: float=0.0) -> Tuple[torch.Tensor, torch.Tensor]:
# %% # from tmp_loss_sde import q_sample_diffuser_alt_half """## Defining the forward diffusion process The forward diffusion process gradually adds noise to an image from the real distribution, in a number of time steps $T$. This happens according to a **variance schedule**. The original DDPM authors employed a linear schedule: > We set the forward process variances to constants increasing linearly from $\beta_1 = 10^{−4}$ to $\beta_T = 0.02$. However, it was shown in ([Nichol et al., 2021](https://arxiv.org/abs/2102.09672)) that better results can be achieved when employing a cosine schedule. Below, we define various schedules for the $T$ timesteps, as well as corresponding variables which we'll need, such as cumulative variances. """ def cosine_beta_schedule(timesteps, s=0.008): """ cosine schedule as proposed in https://arxiv.org/abs/2102.09672 """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0.0001, 0.9999) def linear_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start, beta_end, timesteps) def quadratic_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2 def sigmoid_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 betas = torch.linspace(-6, 6, timesteps) return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start def extract(a, t, x_shape): batch_size = t.shape[0] out = a.gather(-1, t.cpu()) return out.reshape(batch_size, *((1,) * (len(x_shape) - 1))).to(t.device) class NoiseScheduler(): SCHED_COSINE = "SC_COS" SCHED_LINEAR = "SC_LIN" SCHED_QUADRATIC = "SC_QUAD" SCHED_SIGMOID = "SC_SIGM" def __init__(self, timesteps: int, scheduler: str, s: float=0.008): self.__timesteps = int(timesteps) self.__s = float(s) self.__scheduler = scheduler # define beta schedule if self.__scheduler == self.SCHED_COSINE: self.__betas = NoiseScheduler.cosine_beta_schedule(timesteps=self.__timesteps, s=self.__s) elif self.__scheduler == self.SCHED_LINEAR: self.__betas = NoiseScheduler.linear_beta_schedule(timesteps=self.__timesteps) self.__derivative_beta = 1 / self.__timesteps self.__derivative_alpha = - 1 / self.__timesteps elif self.__scheduler == self.SCHED_QUADRATIC: self.__betas = NoiseScheduler.quadratic_beta_schedule(timesteps=self.__timesteps) elif self.__scheduler == self.SCHED_SIGMOID: self.__betas = NoiseScheduler.sigmoid_beta_schedule(timesteps=self.__timesteps) else: raise ImportError(f"Undefined scheduler: {self.__scheduler}") # define alphas self.__alphas = 1. - self.betas self.__alphas_cumprod = torch.cumprod(self.alphas, axis=0) self.__alphas_cumprod_prev = F.pad(self.alphas_cumprod[:-1], (1, 0), value=1.0) self.__sqrt_recip_alphas = torch.sqrt(1.0 / self.alphas) # Calculations for backdoor self.__sqrt_alphas = torch.sqrt(self.alphas) self.__one_minus_sqrt_alphas = 1 - self.sqrt_alphas self.__one_minus_alphas = 1 - self.alphas # calculations for diffusion q(x_t | x_{t-1}) and others self.__sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod) self.__sqrt_one_minus_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod) self.__R_coef = self.one_minus_sqrt_alphas * self.sqrt_one_minus_alphas_cumprod / self.one_minus_alphas # calculations for posterior q(x_{t-1} | x_t, x_0) self.__posterior_variance = self.betas * (1. - self.alphas_cumprod_prev) / (1. - self.alphas_cumprod) @staticmethod def cosine_beta_schedule(timesteps, s=0.008): """ cosine schedule as proposed in https://arxiv.org/abs/2102.09672 """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0.0001, 0.9999) @staticmethod def linear_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start, beta_end, timesteps) @staticmethod def quadratic_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2 @staticmethod def sigmoid_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 betas = torch.linspace(-6, 6, timesteps) return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start @property def betas(self): return self.__betas @property def alphas(self): return self.__alphas @property def alphas_cumprod(self): return self.__alphas_cumprod @property def alphas_cumprod_prev(self): return self.__alphas_cumprod_prev @property def sqrt_recip_alphas(self): return self.__sqrt_recip_alphas @property def sqrt_alphas(self): return self.__sqrt_alphas @property def one_minus_sqrt_alphas(self): return self.__one_minus_sqrt_alphas @property def one_minus_alphas(self): return self.__one_minus_alphas @property def sqrt_alphas_cumprod(self): return self.__sqrt_alphas_cumprod @property def sqrt_one_minus_alphas_cumprod(self): return self.__sqrt_one_minus_alphas_cumprod @property def R_coef(self): return self.__R_coef @property def posterior_variance(self): return self.__posterior_variance """<img src="https://drive.google.com/uc?id=1QifsBnYiijwTqru6gur9C0qKkFYrm-lN" width="800" /> This means that we can now define the loss function given the model as follows: """ # forward diffusion def q_sample_clean(noise_sched, x_start, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(noise_sched.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract( noise_sched.sqrt_one_minus_alphas_cumprod, t, x_start.shape ) return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise, noise def q_sample_backdoor(noise_sched, x_start, R, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(noise_sched.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract( noise_sched.sqrt_one_minus_alphas_cumprod, t, x_start.shape ) R_coef_t = extract(noise_sched.R_coef, t, x_start.shape) return sqrt_alphas_cumprod_t * x_start + (1 - sqrt_alphas_cumprod_t) * R + sqrt_one_minus_alphas_cumprod_t * noise, R_coef_t * R + noise """ <img src="https://drive.google.com/uc?id=1QifsBnYiijwTqru6gur9C0qKkFYrm-lN" width="800" /> This means that we can now define the loss function given the model as follows: """ def p_losses_clean(noise_sched, denoise_model, x_start, t, noise=None, loss_type="l2"): if len(x_start) == 0: return 0 if noise is None: noise = torch.randn_like(x_start) x_noisy, target = q_sample_clean(noise_sched=noise_sched, x_start=x_start, t=t, noise=noise) predicted_noise = denoise_model(x_noisy, t) if loss_type == 'l1': loss = F.l1_loss(target, predicted_noise) elif loss_type == 'l2': loss = F.mse_loss(target, predicted_noise) elif loss_type == "huber": loss = F.smooth_l1_loss(target, predicted_noise) else: raise NotImplementedError() return loss def p_losses_backdoor(noise_sched, denoise_model, x_start, R, t, noise=None, loss_type="l2"): if len(x_start) == 0: return 0 if noise is None: noise = torch.randn_like(x_start) x_noisy, target = q_sample_backdoor(noise_sched=noise_sched, x_start=x_start, R=R, t=t, noise=noise) predicted_noise = denoise_model(x_noisy, t) if loss_type == 'l1': loss = F.l1_loss(target, predicted_noise) elif loss_type == 'l2': loss = F.mse_loss(target, predicted_noise) elif loss_type == "huber": loss = F.smooth_l1_loss(target, predicted_noise) else: raise NotImplementedError() return loss def p_losses(noise_sched, denoise_model, x_start, R, is_clean, t, noise=None, loss_type="l2"): is_not_clean = torch.where(is_clean, False, True) if noise != None: noise_clean = noise[is_clean] noise_backdoor = noise[is_not_clean] else: noise_clean = noise_backdoor = noise loss_clean = p_losses_clean(noise_sched=noise_sched, denoise_model=denoise_model, x_start=x_start[is_clean], t=t[is_clean], noise=noise_clean, loss_type=loss_type) loss_backdoor = p_losses_backdoor(noise_sched=noise_sched, denoise_model=denoise_model, x_start=x_start[is_not_clean], R=R[is_not_clean], t=t[is_not_clean], noise=noise_backdoor, loss_type=loss_type) return (loss_clean + loss_backdoor) / 2 # ================================================== class LossSampler(): def __init__(self, noise_sched: NoiseScheduler): self.__noise_sched = noise_sched def get_fn(self): return partial(p_losses_backdoor, self.__noise_sched), partial(q_sample_backdoor, self.__noise_sched) def plot(x, title: str, log_scale: bool=False): plt.plot(x) plt.title(title) if log_scale: plt.yscale("log") plt.show() def get_derivative(x: torch.Tensor, t: int): if t + 1 < len(x): return x[t + 1] - x[t] return x[t] - x[t - 1] def get_derivatives(x: torch.Tensor): x_delta_t = torch.roll(x, -1, 0) x_delta_t[-1] = x_delta_t[-2] x[-1] = x[-2] return x_delta_t - x def central_derivative(fn, x, stop_thres: float=1e-5, stop_iter_n: int=50, delta: float=1e-2, divisor: float=10.0): der = lambda d: (fn(x + d) - fn(x - d)) / (2 * d) iter_n = 0 res = der(delta) last_res = 0 while (abs(res - last_res) > stop_thres or iter_n < 1) and iter_n < stop_iter_n: last_res = res delta = delta / divisor res = der(delta) iter_n = iter_n + 1 return res def get_alpha_beta_fn_linear(beta_start: float, beta_end: float, timesteps: int): def beta_fn(t): return float(beta_start) + (float(beta_end) - float(beta_start)) * t / (float(timesteps) - 1.0) def alpha_fn(t): return 1.0 - beta_fn(t) return alpha_fn, beta_fn def integral(fn: Callable[[Union[int, float]], Union[int, float]], interval_low: float, interval_up: float, div: int=100): lin_space = torch.linspace(interval_low, interval_up, div, dtype=torch.float32) res = fn(lin_space[:-1]) return torch.sum(res, dim=0) * (interval_up - interval_low) / div def prod_integral(xs: torch.Tensor, x_fn: Callable[[Union[int, float]], Union[int, float]], div: int=200): def log_x_fn(x): return torch.log(x_fn(x).double()).double() def integral_fn(x): return (torch.trapezoid(log_x_fn(torch.linspace(0, x, div * int(x)).to('cpu').double())) / div).double() def exp_integral_fn(x): return torch.exp(integral_fn(x)).double() return torch.linspace(start=0, end=len(xs)-1, steps=len(xs)).to('cpu').double().apply_(exp_integral_fn).float() def get_alphas_cumprod_derivative(alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): div = 200 def log_alpha_fn(x): return torch.log(alpha_fn(x).double()).double() def integral_fn(x): return (torch.trapezoid(log_alpha_fn(torch.linspace(0, x, div * int(x)).to('cpu').double())) / div).double() def exp_integral_fn(x): return torch.exp(integral_fn(x)).double() def der_fn(x): return central_derivative(exp_integral_fn, x, stop_thres=1e-3, stop_iter_n=2, delta=1e-2, divisor=10.0) def coef_fn(x): return (exp_integral_fn(x) * torch.log(alpha_fn(torch.Tensor([x]).double()))).double() # fn_int = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).double().apply_(integral_fn) # fn_prod_int = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).double().apply_(exp_integral_fn) # for i in range(len(fn_prod_int[:20])): # print(f"Time: {i} - Alpha Fn Product Integral Analytic: {fn_prod_int[i]}") # plot(fn_prod_int, title="Alpha Fn Product Integral", log_scale=True) # print(f"fn_int: {fn_int[:20]}") # plot(fn_int, title="Alpha Fn Integral") res = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).to('cpu').float().apply_(coef_fn).double() return res # return torch.exp(integral_res) * (torch.log(alphas[-1]) - torch.log(alphas[0])) def get_alphas_hat_derivative(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): return get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn).to(alphas_cumprod.device) / 2 * (alphas_cumprod ** 0.5) def get_sigmas_hat_derivative(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): return - get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn).to(alphas_cumprod.device) / 2 * ((1 - alphas_cumprod) ** 0.5) def sci(x: float): return "{:.2e}".format(x) def get_R_coef_alt(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]], psi: float=1, solver_type: str='sde'): one_minus_alphas_cumprod = 1 - alphas_cumprod # Fokker-Planck: g^2(t) = derivative of \hat{\beta}^2(t) # coef = psi * (torch.sqrt(one_minus_alphas_cumprod / alphas_cumprod)) + (1 - psi) # g^2(t) = \frac{d \hat{\beta}^2(t)}{dt} - 2 * \frac{d \log \hat{\alpha}(t)}{dt} * \hat{\beta}^2(t) coef = (psi * (torch.sqrt(one_minus_alphas_cumprod / alphas_cumprod)) + (1 - psi)) / (1 + (one_minus_alphas_cumprod / alphas_cumprod)) # Simplified # coef = torch.ones_like(alphas_cumprod) if str(solver_type).lower() == 'ode': return coef elif str(solver_type).lower() == 'sde': return 0.5 * coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_variational(alphas_cumprod: torch.Tensor, psi: float=1, solver_type: str='sde'): coef = psi * (1 - alphas_cumprod ** 0.5) / (1 - alphas_cumprod) ** 0.5 + (1 - psi) if str(solver_type).lower() == 'ode': return 2 * coef elif str(solver_type).lower() == 'sde': return coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") # def get_R_coef_baddiff(alphas_cumprod: torch.Tensor, psi: float=1, solver_type: str='sde'): # coef = psi * (1 - alphas_cumprod ** 0.5) / (1 - alphas_cumprod) ** 0.5 + (1 - psi) # if str(solver_type).lower() == 'ode': # return 2 * coef # elif str(solver_type).lower() == 'sde': # return coef # else: # raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]], psi: float=1): alphas_hat = (alphas_cumprod ** 0.5).double() sigmas_hat = ((1 - alphas_cumprod) ** 0.5).double() alphas_hat_derivative = get_alphas_hat_derivative(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn).double() sigmas_hat_derivative = get_sigmas_hat_derivative(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn).double() alt_r = 0.5 * alphas_hat / (alphas_hat + sigmas_hat) # plot(alt_r, title="Alternate R", log_scale=True) a = (- psi * alphas_hat_derivative + (1 - psi) * sigmas_hat_derivative).double() b = (psi * (1 - alphas_hat) + (1 - psi) * sigmas_hat).double() c = (2 * sigmas_hat * sigmas_hat_derivative - 2 * (alphas_hat_derivative / alphas_hat) * (sigmas_hat ** 2)).double() # plot(alpha_fn(torch.linspace(0, 999, 1000).float()), title="Alpha Fn", log_scale=True) # fn_cumprod = torch.cumprod(alpha_fn(torch.linspace(0, 999, 1000).float()), dim=0) # for i in range(len(fn_cumprod[:20])): # print(f"Time: {i} - Alpha Fn Cumprod: {fn_cumprod[i]}") # plot(fn_cumprod, title="Alpha Fn Cumprod", log_scale=True) # plot(alphas, title="Alpha") # for i in range(len(alphas_cumprod[:20])): # print(f"Time: {i} - Alpha Cumprod: {alphas_cumprod[i]}") # plot(alphas_cumprod, title="Alpha Cumprod", log_scale=True) # plot(get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn), title="Alpha Cumprod Derivative Anlytic") # plot(get_derivatives(x=alphas_cumprod)[:-1], title="Alpha Cumprod Derivative Numeric") # plot(alphas_hat, title="Alpha Hat", log_scale=True) # plot(sigmas_hat, title="Beta Hat", log_scale=True) # plot(alphas_hat_derivative, title="Alpha Hat Derivative") # plot(sigmas_hat_derivative, title="Sigma Hat Derivative") # plot(a, title="Rho Derivative") # plot(b, title="Rho") # plot(c, title="G^2", log_scale=True) # plot(alphas_hat_derivative / alphas_hat, title="f(t)") coef = (sigmas_hat * a / (c)).double() # for i in range(len(sigmas_hat[:20])): # print(f"Time: {i} - R: {sci(coef[i])} beta_hat: {sci(sigmas_hat[i])}, rho_deriv: {sci(a[i])}, G^2: {sci(c[i])}") if torch.isnan(sigmas_hat).any(): print(f"sigmas_hat - Nan: {sigmas_hat[torch.isnan(sigmas_hat).nonzero()]}") if torch.isnan(a).any(): print(f"Rho Derivative - Nan: {a[torch.isnan(a).nonzero()]}") if torch.isnan(b).any(): print(f"Rho - Nan: {b[torch.isnan(b).nonzero()]}") if torch.isnan(c).any(): print(f"G^2 - Nan: {c[torch.isnan(c).nonzero()]}") # return torch.clamp(coef, min=None, max=1) # return coef return alt_r def get_ks(alphas_hat: torch.Tensor) -> torch.Tensor: prev_alphas_hat = torch.roll(alphas_hat, 1, 0) prev_alphas_hat[0] = 1 return alphas_hat / prev_alphas_hat def get_ws(betas_hat: torch.Tensor, ks: torch.Tensor) -> torch.Tensor: ws = [betas_hat[0]] residuals = [0] for i, beta_hat_i in enumerate(betas_hat): if i < 1: continue residuals.append((ks[i] ** 2) * (ws[i - 1] ** 2 + residuals[i - 1])) ws.append((beta_hat_i ** 2 - residuals[i]) ** 0.5) return torch.Tensor(ws) def get_hs(rhos_hat: torch.Tensor, ks: torch.Tensor) -> torch.Tensor: hs = [rhos_hat[0]] residuals = [0] for i, rho_hat_i in enumerate(rhos_hat): if i < 1: continue residuals.append(ks[i] * (hs[i - 1] + residuals[i - 1])) hs.append(rho_hat_i - residuals[i]) return torch.Tensor(hs) def get_ws_ve(sigmas: torch.Tensor) -> torch.Tensor: ws = [sigmas[0]] residuals = [0] for i, sigma_i in enumerate(sigmas): if i < 1: continue residuals.append(ws[i - 1] ** 2 + residuals[i - 1]) ws.append((sigma_i ** 2 - residuals[i]) ** 0.5) return torch.Tensor(ws) def get_hs_ve(rhos_hat: torch.Tensor) -> torch.Tensor: hs = [rhos_hat[0]] residuals = [0] for i, rho_hat_i in enumerate(rhos_hat): if i < 1: continue residuals.append(hs[i - 1] + residuals[i - 1]) hs.append(rho_hat_i - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_ve(sigmas: torch.Tensor, rhos_hat: torch.Tensor, ws: torch.Tensor, hs: torch.Tensor, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") prev_rhos_hat = torch.roll(rhos_hat, 1, 0) prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat trojdiff_coef = ve_scale * (ws ** 2 * (rhos_hat - prev_rhos_hat) + hs * prev_sigmas) / (ws ** 2 * sigmas) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_gen_ve_reduce(sigmas: torch.Tensor, hs: torch.Tensor, rhos_hat_w: float=1.0, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") # prev_rhos_hat = torch.roll(rhos_hat, 1, 0) # prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat_w * sigmas trojdiff_coef = ve_scale * (sigmas * rhos_hat_w / (sigmas + prev_sigmas)) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_hs_vp(alphas: torch.Tensor, alphas_cumprod: torch.Tensor) -> torch.Tensor: hs = [(1 - alphas_cumprod[0]) ** 0.5] residuals = [0] for i, (alphas_cumprod_i, alphas_i) in enumerate(zip(alphas_cumprod, alphas)): if i < 1: continue residuals.append((alphas_i ** 0.5) * (hs[i - 1] + residuals[i - 1])) hs.append((1 - alphas_cumprod_i) ** 0.5 - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_vp(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, hs: torch.Tensor=None, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term baddiff_step = 1 - alphas_cumprod ** 0.5 baddiff_coef = vp_scale * (1 - alphas ** 0.5) * (1 - alphas_cumprod) ** 0.5 / (1 - alphas) # TrojDiff style correction term if psi != 1: if hs == None: raise ValueError(f"Arhuement hs shouldn't be {hs} when psi is {psi}") trojdiff_step = (1 - alphas_cumprod) ** 0.5 trojdiff_coef = - ve_scale * ((alphas ** 0.5 - 1) * (1 - alphas_cumprod) ** 0.5 * (1 - alphas) - hs * (alphas - alphas_cumprod)) / (1 - alphas) # Coefficients & Steps step = psi * baddiff_step + (1 - psi) * trojdiff_step coef = psi * baddiff_coef + (1 - psi) * trojdiff_coef else: # Coefficients & Steps step = baddiff_step coef = baddiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_elbo_gen(noise_sched, sde_type: str="vp", psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0, device=None, dtype=None, rhos_hat_w: float=1.0, rhos_hat_b: float=0.0) -> Tuple[torch.Tensor, torch.Tensor]:
if sde_type == DiffuserModelSched.SDE_VP or sde_type == DiffuserModelSched.SDE_LDM:
3
2023-10-17 19:57:37+00:00
24k
nchen909/Pass-Tuning
evaluator/CodeBLEU/syntax_match.py
[ { "identifier": "DFG_python", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_python(root_node,index_to_code,states):\n assignment=['assignment','augmented_assignment','for_in_clause']\n if_statement=['if_statement']\n for_statement=['for_statement']\n while_statement=['while...
from evaluator.CodeBLEU.parser import DFG_python, DFG_java, DFG_ruby, DFG_go, DFG_php, DFG_javascript, DFG_csharp,DFG_c from evaluator.CodeBLEU.parser import (remove_comments_and_docstrings, tree_to_token_index, index_to_code_token, tree_to_variable_index) from tree_sitter import Language, Parser
17,382
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python,
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python,
'java': DFG_java,
1
2023-10-20 09:24:44+00:00
24k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES...
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
15,722
for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles: roles_mgr = RoleContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) # idea: change digest() name to something else? maybe aggregate, calculate?... io_data = roles_mgr.digest() if any([ process_groups, balance_sheet_statement, income_statement, cash_flow_statement ]):
""" Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <msanda@arrobalytics.com> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt: raise InvalidDateInputError( message=f'Could not parse date from {dt}' ) elif is_naive(fdt): fdt = make_aware(fdt) return fdt if no_parse_localdate: return localtime() def validate_dates( from_date: Union[str, datetime, date] = None, to_date: Union[str, datetime, date] = None) -> Tuple[date, date]: from_date = validate_io_date(from_date, no_parse_localdate=False) to_date = validate_io_date(to_date) return from_date, to_date def validate_activity(activity: str, raise_404: bool = False): # idea: move to model???... JournalEntryModel = lazy_loader.get_journal_entry_model() valid = activity in JournalEntryModel.VALID_ACTIVITIES if activity and not valid: exception = ValidationError(f'{activity} is invalid. Choices are {JournalEntryModel.VALID_ACTIVITIES}.') if raise_404: raise Http404(exception) raise exception return activity class IOValidationError(ValidationError): pass class IODatabaseMixIn: """ Controls how transactions are recorded into the ledger. """ def is_entity_model(self): return isinstance(self, lazy_loader.get_entity_model()) def is_ledger_model(self): return isinstance(self, lazy_loader.get_ledger_model()) def is_entity_unit_model(self): return isinstance(self, lazy_loader.get_unit_model()) def get_entity_model_from_io(self): if self.is_entity_model(): return self elif self.is_ledger_model(): return self.entity elif self.is_entity_unit_model(): return self.entity # def is_time_bounded(self, from_date, to_date): def database_digest(self, txs_queryset: QuerySet, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, from_date: date = None, to_date: date = None, activity: str = None, role: str = None, accounts: str or List[str] or Set[str] = None, posted: bool = True, exclude_zero_bal: bool = True, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, by_unit: bool = False, **kwargs): if settings.DJANGO_LEDGER_USE_CLOSING_ENTRIES: if not from_date: entity_model = self.get_entity_model_from_io() closing_entry_date = entity_model.select_closing_entry_for_io_date(to_date=to_date) # print(closing_entry_date) # # if closing_entry_date: # closing_entry_list = entity_model.get_closing_entry_cache_for_date( # closing_date=closing_entry_date, # force_cache_update=True # ) # from_date_d = closing_entry_date + timedelta(days=1) # print('Orig From:', from_date) # print('New from:', from_date_d) # print('To Date:', to_date) # print(closing_entry_list) if not txs_queryset: TransactionModel = lazy_loader.get_txs_model() if self.is_entity_model(): if entity_slug: if entity_slug != self.slug: raise IOValidationError('Inconsistent entity_slug. ' f'Provided {entity_slug} does not match actual {self.slug}') if unit_slug: txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug or self.slug, unit_slug=unit_slug ) else: txs_queryset = TransactionModel.objects.for_entity( user_model=user_model, entity_slug=self ) elif self.is_ledger_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Ledger Model requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_ledger( user_model=user_model, entity_slug=entity_slug, ledger_model=self ) elif self.is_entity_unit_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Entity Unit requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug, unit_slug=unit_slug or self ) else: txs_queryset = TransactionModel.objects.none() txs_queryset = txs_queryset.not_closing_entry() if exclude_zero_bal: txs_queryset = txs_queryset.filter(amount__gt=0) if posted: txs_queryset = txs_queryset.posted() if from_date: txs_queryset = txs_queryset.from_date(from_date=from_date) if to_date: txs_queryset = txs_queryset.to_date(to_date=to_date) if accounts: if not isinstance(accounts, str): accounts = [accounts] txs_queryset = txs_queryset.for_accounts(account_list=accounts) if activity: if isinstance(activity, str): activity = [activity] txs_queryset = txs_queryset.for_activity(activity_list=activity) if role: txs_queryset = txs_queryset.for_roles(role_list=role) VALUES = [ 'account__uuid', 'account__balance_type', 'tx_type', 'account__code', 'account__name', 'account__role', ] ANNOTATE = {'balance': Sum('amount')} ORDER_BY = ['account__uuid'] if by_unit: ORDER_BY.append('journal_entry__entity_unit__uuid') VALUES += ['journal_entry__entity_unit__uuid', 'journal_entry__entity_unit__name'] if by_period: ORDER_BY.append('journal_entry__timestamp') ANNOTATE['dt_idx'] = TruncMonth('journal_entry__timestamp') if by_activity: ORDER_BY.append('journal_entry__activity') VALUES.append('journal_entry__activity') if by_tx_type: ORDER_BY.append('tx_type') VALUES.append('tx_type') return txs_queryset.values(*VALUES).annotate(**ANNOTATE).order_by(*ORDER_BY) def python_digest(self, txs_queryset: Optional[QuerySet] = None, user_model: Optional[UserModel] = None, to_date: date = None, from_date: date = None, equity_only: bool = False, activity: str = None, entity_slug: str = None, unit_slug: str = None, role: Optional[Union[Set[str], List[str]]] = None, accounts: Optional[Union[Set[str], List[str]]] = None, signs: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, **kwargs) -> list or tuple: if equity_only: role = roles_module.GROUP_EARNINGS txs_queryset = self.database_digest( user_model=user_model, txs_queryset=txs_queryset, to_date=to_date, from_date=from_date, entity_slug=entity_slug, unit_slug=unit_slug, activity=activity, role=role, accounts=accounts, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, by_period=by_period, **kwargs) for tx_model in txs_queryset: if tx_model['account__balance_type'] != tx_model['tx_type']: tx_model['balance'] = -tx_model['balance'] # txs_list = list(txs_queryset) # txs_list.sort(key=lambda a: ( # a['account__uuid'], # str(a.get('journal_entry__entity_unit__uuid', '')) if by_unit else '', # a['dt_idx'].year if by_period else 0, # a['dt_idx'].month if by_period else 0, # str(a['journal_entry__activity']) if by_activity else None, # a['tx_type'] if by_tx_type else '', # )) accounts_gb_code = groupby(txs_queryset, key=lambda a: ( a['account__uuid'], a.get('journal_entry__entity_unit__uuid') if by_unit else None, a.get('dt_idx').year if by_period else None, a.get('dt_idx').month if by_period else None, a.get('journal_entry__activity') if by_activity else None, a.get('tx_type') if by_tx_type else None, )) gb_digest = [self.aggregate_balances(k, g) for k, g in accounts_gb_code] for acc in gb_digest: acc['balance_abs'] = abs(acc['balance']) if signs: TransactionModel = lazy_loader.get_txs_model() for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles: roles_mgr = RoleContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) # idea: change digest() name to something else? maybe aggregate, calculate?... io_data = roles_mgr.digest() if any([ process_groups, balance_sheet_statement, income_statement, cash_flow_statement ]):
group_mgr = GroupContextManager(
5
2023-10-20 01:07:20+00:00
24k
acolas1/KGSimple
simplify.py
[ { "identifier": "FluencyScorer", "path": "scoring/fluency_scorer.py", "snippet": "class FluencyScorer:\n def __init__(self, batch_size=1, reduce=\"mean\", log=True, laplace_smooth=False, prob_dict_path=None):\n self.device = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\n self.ba...
import os import json import numpy as np import pandas as pd import torch import random from collections import defaultdict from transformers import BartTokenizer, T5Tokenizer from transformers import AdamW, get_linear_schedule_with_warmup from utils import * from scoring.fluency_scorer import FluencyScorer from scoring.saliency_scorer import SaliencyBERTScore from scoring.simplicity_scorer import SimplicityTextScore from scoring.guardrails import * from scoring.aggregate_scorer import ScorerWrapper from GAP.data_relations_as_nodes import GAPDataloader, EventDataset, WebNLGDataset from GAP.data_relations_as_nodes import evaluate_bleu, get_t_emb_dim from tqdm import tqdm, trange from rake_nltk import Rake from evaluate import load from sentence_similarity import sentence_similarity from GAP.modeling_gap_type import GAPBartForConditionalGeneration as GAP_Type_model from GAP.modeling_gap import GAPBartForConditionalGeneration as GAP_model
21,336
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args)
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args)
model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim)
8
2023-10-24 13:24:23+00:00
24k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=...
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
14,879
strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str):
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str):
data = IPPortPatternLine.findall(data)
7
2023-11-05 13:28:57+00:00
24k
TheFunny/ArisuAutoSweeper
module/webui/app.py
[ { "identifier": "AzurLaneConfig", "path": "module/config/config.py", "snippet": "class AzurLaneConfig(ConfigUpdater, ManualConfig, GeneratedConfig, ConfigWatcher):\n stop_event: threading.Event = None\n bound = {}\n\n # Class property\n is_hoarding_task = True\n\n def __setattr__(self, ke...
import argparse import queue import threading import time import module.webui.lang as lang from datetime import datetime from functools import partial from typing import Dict, List, Optional from pywebio import config as webconfig from pywebio.output import ( Output, clear, close_popup, popup, put_button, put_buttons, put_collapse, put_column, put_error, put_html, put_link, put_loading, put_markdown, put_row, put_scope, put_table, put_text, put_warning, toast, use_scope, ) from pywebio.pin import pin, pin_on_change from pywebio.session import go_app, info, local, register_thread, run_js, set_env from module.config.config import AzurLaneConfig, Function from module.config.utils import ( alas_instance, alas_template, deep_get, deep_iter, deep_set, dict_to_kv, filepath_args, filepath_config, read_file, ) from module.logger import logger from module.webui.base import Frame from module.webui.fake import ( get_config_mod, load_config, ) from module.webui.fastapi import asgi_app from module.webui.lang import _t, t from module.webui.pin import put_input, put_select from module.webui.process_manager import ProcessManager from module.webui.remote_access import RemoteAccess from module.webui.setting import State from module.webui.updater import updater from module.webui.utils import ( Icon, Switch, TaskHandler, add_css, filepath_css, get_alas_config_listen_path, get_localstorage, get_window_visibility_state, login, parse_pin_value, raise_exception, re_fullmatch, ) from module.webui.widgets import ( BinarySwitchButton, RichLog, T_Output_Kwargs, put_icon_buttons, put_loading_text, put_none, put_output, )
15,264
log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value}) for path in get_alas_config_listen_path(self.ALAS_ARGS): pin_on_change( name="_".join(path), onchange=partial(put_queue, ".".join(path)) ) logger.info("Init config watcher done.") def _alas_thread_update_config(self) -> None: modified = {} while self.alive: try: d = self.modified_config_queue.get(timeout=10) config_name = self.alas_name read = self.alas_config.read_file write = self.alas_config.write_file except queue.Empty: continue modified[d["name"]] = d["value"] while True: try: d = self.modified_config_queue.get(timeout=1) modified[d["name"]] = d["value"] except queue.Empty: self._save_config(modified, config_name, read, write) modified.clear() break def _save_config( self, modified: Dict[str, str], config_name: str, read=State.config_updater.read_file, write=State.config_updater.write_file, ) -> None: try: valid = [] invalid = [] config = read(config_name) for k, v in modified.copy().items(): valuetype = deep_get(self.ALAS_ARGS, k + ".valuetype") v = parse_pin_value(v, valuetype) validate = deep_get(self.ALAS_ARGS, k + ".validate") if not len(str(v)): default = deep_get(self.ALAS_ARGS, k + ".value") modified[k] = default deep_set(config, k, default) valid.append(k) pin["_".join(k.split("."))] = default elif not validate or re_fullmatch(validate, v): deep_set(config, k, v) modified[k] = v valid.append(k) # update Emotion Record if Emotion Value is changed if "Emotion" in k and "Value" in k: k = k.split(".") k[-1] = k[-1].replace("Value", "Record") k = ".".join(k) v = datetime.now().strftime("%Y-%m-%d %H:%M:%S") modified[k] = v deep_set(config, k, v) valid.append(k) pin["_".join(k.split("."))] = v else: modified.pop(k) invalid.append(k) logger.warning(f"Invalid value {v} for key {k}, skip saving.") self.pin_remove_invalid_mark(valid) self.pin_set_invalid_mark(invalid) if modified: toast( t("Gui.Toast.ConfigSaved"), duration=1, position="right", color="success", ) logger.info(
task_handler = TaskHandler() class AlasGUI(Frame): ALAS_MENU: Dict[str, Dict[str, List[str]]] ALAS_ARGS: Dict[str, Dict[str, Dict[str, Dict[str, str]]]] ALAS_STORED: Dict[str, Dict[str, Dict[str, str]]] theme = "default" def initial(self) -> None: self.ALAS_MENU = read_file(filepath_args("menu", self.alas_mod)) self.ALAS_ARGS = read_file(filepath_args("args", self.alas_mod)) self.ALAS_STORED = read_file(filepath_args("stored", self.alas_mod)) self._init_alas_config_watcher() def __init__(self) -> None: super().__init__() # modified keys, return values of pin_wait_change() self.modified_config_queue = queue.Queue() # alas config name self.alas_name = "" self.alas_mod = "alas" self.alas_config = AzurLaneConfig("template") self.initial() @use_scope("aside", clear=True) def set_aside(self) -> None: # TODO: update put_icon_buttons() put_icon_buttons( Icon.DEVELOP, buttons=[ {"label": t("Gui.Aside.Home"), "value": "Home", "color": "aside"} ], onclick=[self.ui_develop], ), for name in alas_instance(): put_icon_buttons( Icon.RUN, buttons=[{"label": name, "value": name, "color": "aside"}], onclick=self.ui_alas, ) put_icon_buttons( Icon.ADD, buttons=[ {"label": t("Gui.Aside.AddAlas"), "value": "AddAlas", "color": "aside"} ], onclick=[self.ui_add_alas], ), @use_scope("header_status") def set_status(self, state: int) -> None: """ Args: state (int): 1 (running) 2 (not running) 3 (warning, stop unexpectedly) 4 (stop for update) 0 (hide) -1 (*state not changed) """ if state == -1: return clear() if state == 1: put_loading_text(t("Gui.Status.Running"), color="success") elif state == 2: put_loading_text(t("Gui.Status.Inactive"), color="secondary", fill=True) elif state == 3: put_loading_text(t("Gui.Status.Warning"), shape="grow", color="warning") elif state == 4: put_loading_text(t("Gui.Status.Updating"), shape="grow", color="success") @classmethod def set_theme(cls, theme="default") -> None: cls.theme = theme State.deploy_config.Theme = theme State.theme = theme webconfig(theme=theme) @use_scope("menu", clear=True) def alas_set_menu(self) -> None: """ Set menu """ put_buttons( [{ "label": t("Gui.MenuAlas.Overview"), "value": "Overview", "color": "menu", }], onclick=[self.alas_overview], ).style(f"--menu-Overview--") for menu, task_data in self.ALAS_MENU.items(): if task_data.get("page") == "tool": _onclick = self.alas_daemon_overview else: _onclick = self.alas_set_group if task_data.get("menu") == "collapse": task_btn_list = [ put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--") for task in task_data.get("tasks", []) ] put_collapse(title=t(f"Menu.{menu}.name"), content=task_btn_list) else: title = t(f"Menu.{menu}.name") put_html('<div class="hr-task-group-box">' '<span class="hr-task-group-line"></span>' f'<span class="hr-task-group-text">{title}</span>' '<span class="hr-task-group-line"></span>' '</div>' ) for task in task_data.get("tasks", []): put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--").style(f"padding-left: 0.75rem") self.alas_overview() @use_scope("content", clear=True) def alas_set_group(self, task: str) -> None: """ Set arg groups from dict """ self.init_menu(name=task) self.set_title(t(f"Task.{task}.name")) put_scope("_groups", [put_none(), put_scope("groups"), put_scope("navigator")]) task_help: str = t(f"Task.{task}.help") if task_help: put_scope( "group__info", scope="groups", content=[put_text(task_help).style("font-size: 1rem")], ) config = self.alas_config.read_file(self.alas_name) for group, arg_dict in deep_iter(self.ALAS_ARGS[task], depth=1): if self.set_group(group, arg_dict, config, task): self.set_navigator(group) @use_scope("groups") def set_group(self, group, arg_dict, config, task): group_name = group[0] output_list: List[Output] = [] for arg, arg_dict in deep_iter(arg_dict, depth=1): output_kwargs: T_Output_Kwargs = arg_dict.copy() # Skip hide display: Optional[str] = output_kwargs.pop("display", None) if display == "hide": continue # Disable elif display == "disabled": output_kwargs["disabled"] = True # Output type output_kwargs["widget_type"] = output_kwargs.pop("type") arg_name = arg[0] # [arg_name,] # Internal pin widget name output_kwargs["name"] = f"{task}_{group_name}_{arg_name}" # Display title output_kwargs["title"] = t(f"{group_name}.{arg_name}.name") # Get value from config value = deep_get( config, [task, group_name, arg_name], output_kwargs["value"] ) # idk value = str(value) if isinstance(value, datetime) else value # Default value output_kwargs["value"] = value # Options output_kwargs["options"] = options = output_kwargs.pop("option", []) # Options label options_label = [] for opt in options: options_label.append(t(f"{group_name}.{arg_name}.{opt}")) output_kwargs["options_label"] = options_label # Help arg_help = t(f"{group_name}.{arg_name}.help") if arg_help == "" or not arg_help: arg_help = None output_kwargs["help"] = arg_help # Invalid feedback output_kwargs["invalid_feedback"] = t("Gui.Text.InvalidFeedBack", value) o = put_output(output_kwargs) if o is not None: # output will inherit current scope when created, override here o.spec["scope"] = f"#pywebio-scope-group_{group_name}" output_list.append(o) if not output_list: return 0 with use_scope(f"group_{group_name}"): put_text(t(f"{group_name}._info.name")) group_help = t(f"{group_name}._info.help") if group_help != "": put_text(group_help) put_html('<hr class="hr-group">') for output in output_list: output.show() return len(output_list) @use_scope("navigator") def set_navigator(self, group): js = f""" $("#pywebio-scope-groups").scrollTop( $("#pywebio-scope-group_{group[0]}").position().top + $("#pywebio-scope-groups").scrollTop() - 59 ) """ put_button( label=t(f"{group[0]}._info.name"), onclick=lambda: run_js(js), color="navigator", ) def set_dashboard(self, arg, arg_dict, config): i18n = arg_dict.get('i18n') if i18n: name = t(i18n) else: name = arg color = arg_dict.get("color", "#777777") nodata = t("Gui.Dashboard.NoData") def set_value(dic): if "total" in dic.get("attrs", []) and config.get("total") is not None: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), put_text(f' / {config.get("total", "")}').style("--dashboard-time--"), ] else: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), ] with use_scope(f"dashboard-row-{arg}", clear=True): put_html(f'<div><div class="dashboard-icon" style="background-color:{color}"></div>'), put_scope(f"dashboard-content-{arg}", [ put_scope(f"dashboard-value-{arg}", set_value(arg_dict)), put_scope(f"dashboard-time-{arg}", [ put_text(f"{name} - {lang.readable_time(config.get('time', ''))}").style("--dashboard-time--"), ]) ]) @use_scope("content", clear=True) def alas_overview(self) -> None: self.init_menu(name="Overview") self.set_title(t(f"Gui.MenuAlas.Overview")) put_scope("overview", [put_scope("schedulers"), put_scope("logs")]) with use_scope("schedulers"): put_scope( "scheduler-bar", [ put_text(t("Gui.Overview.Scheduler")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ), put_scope("scheduler_btn"), ], ) put_scope( "running", [ put_text(t("Gui.Overview.Running")), put_html('<hr class="hr-group">'), put_scope("running_tasks"), ], ) put_scope( "pending", [ put_text(t("Gui.Overview.Pending")), put_html('<hr class="hr-group">'), put_scope("pending_tasks"), ], ) put_scope( "waiting", [ put_text(t("Gui.Overview.Waiting")), put_html('<hr class="hr-group">'), put_scope("waiting_tasks"), ], ) switch_scheduler = BinarySwitchButton( label_on=t("Gui.Button.Stop"), label_off=t("Gui.Button.Start"), onclick_on=lambda: self.alas.stop(), onclick_off=lambda: self.alas.start(None, updater.event), get_state=lambda: self.alas.alive, color_on="off", color_off="on", scope="scheduler_btn", ) log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value}) for path in get_alas_config_listen_path(self.ALAS_ARGS): pin_on_change( name="_".join(path), onchange=partial(put_queue, ".".join(path)) ) logger.info("Init config watcher done.") def _alas_thread_update_config(self) -> None: modified = {} while self.alive: try: d = self.modified_config_queue.get(timeout=10) config_name = self.alas_name read = self.alas_config.read_file write = self.alas_config.write_file except queue.Empty: continue modified[d["name"]] = d["value"] while True: try: d = self.modified_config_queue.get(timeout=1) modified[d["name"]] = d["value"] except queue.Empty: self._save_config(modified, config_name, read, write) modified.clear() break def _save_config( self, modified: Dict[str, str], config_name: str, read=State.config_updater.read_file, write=State.config_updater.write_file, ) -> None: try: valid = [] invalid = [] config = read(config_name) for k, v in modified.copy().items(): valuetype = deep_get(self.ALAS_ARGS, k + ".valuetype") v = parse_pin_value(v, valuetype) validate = deep_get(self.ALAS_ARGS, k + ".validate") if not len(str(v)): default = deep_get(self.ALAS_ARGS, k + ".value") modified[k] = default deep_set(config, k, default) valid.append(k) pin["_".join(k.split("."))] = default elif not validate or re_fullmatch(validate, v): deep_set(config, k, v) modified[k] = v valid.append(k) # update Emotion Record if Emotion Value is changed if "Emotion" in k and "Value" in k: k = k.split(".") k[-1] = k[-1].replace("Value", "Record") k = ".".join(k) v = datetime.now().strftime("%Y-%m-%d %H:%M:%S") modified[k] = v deep_set(config, k, v) valid.append(k) pin["_".join(k.split("."))] = v else: modified.pop(k) invalid.append(k) logger.warning(f"Invalid value {v} for key {k}, skip saving.") self.pin_remove_invalid_mark(valid) self.pin_set_invalid_mark(invalid) if modified: toast( t("Gui.Toast.ConfigSaved"), duration=1, position="right", color="success", ) logger.info(
f"Save config {filepath_config(config_name)}, {dict_to_kv(modified)}"
9
2023-11-01 07:09:45+00:00
24k
radekd91/inferno
inferno/models/DECA.py
[ { "identifier": "EmoNetLoss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "class EmoNetLoss(EmoLossBase):\n# class EmoNetLoss(object):\n\n def __init__(self, device, emonet=None, trainable=False, normalize_features=False, emo_feat_loss=None, au_loss=None):\n if emonet is None:\n ...
import os, sys import torch import torchvision import torch.nn.functional as F import torchvision.transforms.functional as F_v import numpy as np import cv2 import inferno.layers.losses.DecaLosses as lossfunc import inferno.layers.losses.MediaPipeLandmarkLosses as lossfunc_mp import inferno.utils.DecaUtils as util import pytorch_lightning.plugins.environments.lightning_environment as le import psutil import adabound import copy from pytorch_lightning import LightningModule from pytorch_lightning.loggers import WandbLogger from inferno.layers.losses.EmoNetLoss import EmoNetLoss, create_emo_loss, create_au_loss from skimage.io import imread from skimage.transform import resize from pathlib import Path from inferno.models.Renderer import SRenderY from inferno.models.DecaEncoder import ResnetEncoder, SecondHeadResnet, SwinEncoder from inferno.models.DecaDecoder import Generator, GeneratorAdaIn from inferno.models.DecaFLAME import FLAME, FLAMETex, FLAME_mediapipe from inferno.models.EmotionMLP import EmotionMLP from inferno.datasets.AffWild2Dataset import Expression7 from inferno.datasets.AffectNetDataModule import AffectNetExpressions from inferno.utils.lightning_logging import _log_array_image, _log_wandb_image, _torch_image2np from enum import Enum from inferno.utils.other import class_from_str, get_path_to_assets from inferno.layers.losses.VGGLoss import VGG19Loss from omegaconf import OmegaConf, open_dict from inferno.models.temporal.external.LipReadingLoss import LipReadingLoss from .StarGAN import StarGANWrapper from inferno.models.EmoNetRegressor import EmoNetRegressor, EmonetRegressorStatic from .mica.config import get_cfg_defaults from .mica.mica import MICA from .mica.MicaInputProcessing import MicaInputProcessor from inferno.utils.other import get_path_to_assets from inferno.models.IO import locate_checkpoint
19,545
uv_z = uv_z * self.uv_face_eye_mask # detail vertices = coarse vertice + predicted displacement*normals + fixed displacement*normals uv_detail_vertices = uv_coarse_vertices + \ uv_z * uv_coarse_normals + \ self.fixed_uv_dis[None, None, :,:] * uv_coarse_normals #.detach() dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]) uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1)) uv_detail_normals = uv_detail_normals.reshape( [batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0, 3, 1, 2) # uv_detail_normals = uv_detail_normals*self.uv_face_eye_mask + uv_coarse_normals*(1-self.uv_face_eye_mask) # uv_detail_normals = util.gaussian_blur(uv_detail_normals) return uv_detail_normals, uv_coarse_vertices def visualize(self, visdict, savepath, catdim=1): grids = {} for key in visdict: # print(key) if visdict[key] is None: continue grids[key] = torchvision.utils.make_grid( F.interpolate(visdict[key], [self.config.image_size, self.config.image_size])).detach().cpu() grid = torch.cat(list(grids.values()), catdim) grid_image = (grid.numpy().transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]] grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8) if savepath is not None: cv2.imwrite(savepath, grid_image) return grid_image def create_mesh(self, opdict, dense_template): ''' vertices: [nv, 3], tensor texture: [3, h, w], tensor ''' i = 0 vertices = opdict['verts'][i].cpu().numpy() faces = self.render.faces[0].cpu().numpy() if 'uv_texture_gt' in opdict.keys(): texture = util.tensor2image(opdict['uv_texture_gt'][i]) else: texture = None uvcoords = self.render.raw_uvcoords[0].cpu().numpy() uvfaces = self.render.uvfaces[0].cpu().numpy() # save coarse mesh, with texture and normal map if 'uv_detail_normals' in opdict.keys(): normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5) # upsample mesh, save detailed mesh texture = texture[:, :, [2, 1, 0]] normals = opdict['normals'][i].cpu().numpy() displacement_map = opdict['displacement_map'][i].detach().cpu().numpy().squeeze() dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, dense_template) else: normal_map = None dense_vertices = None dense_colors = None dense_faces = None return vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors def save_obj(self, filename, opdict, dense_template, mode ='detail'): if mode not in ['coarse', 'detail', 'both']: raise ValueError(f"Invalid mode '{mode}. Expected modes are: 'coarse', 'detail', 'both'") vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors \ = self.create_mesh(opdict, dense_template) if mode == 'both': if isinstance(filename, list): filename_coarse = filename[0] filename_detail = filename[1] else: filename_coarse = filename filename_detail = filename.replace('.obj', '_detail.obj') elif mode == 'coarse': filename_coarse = filename else: filename_detail = filename if mode in ['coarse', 'both']: util.write_obj(str(filename_coarse), vertices, faces, texture=texture, uvcoords=uvcoords, uvfaces=uvfaces, normal_map=normal_map) if mode in ['detail', 'both']: util.write_obj(str(filename_detail), dense_vertices, dense_faces, colors = dense_colors, inverse_face_order=True) class ExpDECAInterface(object): """ This serves as an interface for EMOCA-like classes that need to use a different sub class but retain the EMOCA functionality. See EMICA_v2 for an example. """ def _create_model(self): # E_flame should be fixed for expression EMOCA self.E_flame.requires_grad_(False) # 2) add expression decoder if self.config.expression_backbone == 'deca_parallel': ## a) Attach a parallel flow of FCs onto the original DECA coarse backbone. (Only the second FC head is trainable) self.E_expression = SecondHeadResnet(self.E_flame, self.n_exp_param, 'same') elif self.config.expression_backbone == 'deca_clone': ## b) Clones the original DECA coarse decoder (and the entire decoder will be trainable) - This is in final EMOCA. #TODO this will only work for Resnet. Make this work for the other backbones (Swin) as well. self.E_expression = ResnetEncoder(self.n_exp_param) # clone parameters of the ResNet self.E_expression.encoder.load_state_dict(self.E_flame.encoder.state_dict()) elif self.config.expression_backbone == 'emonet_trainable': # Trainable EmoNet instead of Resnet (deprecated)
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at emoca@tue.mpg.de # For commercial licensing contact, please contact ps-license@tuebingen.mpg.de Parts of the code were adapted from the original DECA release: https://github.com/YadiraF/DECA/ """ # from time import time torch.backends.cudnn.benchmark = True class DecaMode(Enum): COARSE = 1 # when switched on, only coarse part of DECA-based networks is used DETAIL = 2 # when switched on, only coarse and detail part of DECA-based networks is used class DecaModule(LightningModule): """ DecaModule is a PL module that implements DECA-inspired face reconstruction networks. """ def __init__(self, model_params, learning_params, inout_params, stage_name = ""): """ :param model_params: a DictConfig of parameters about the model itself :param learning_params: a DictConfig of parameters corresponding to the learning process (such as optimizer, lr and others) :param inout_params: a DictConfig of parameters about input and output (where checkpoints and visualizations are saved) """ super().__init__() self.learning_params = learning_params self.inout_params = inout_params # detail conditioning - what is given as the conditioning input to the detail generator in detail stage training if 'detail_conditioning' not in model_params.keys(): # jaw, expression and detail code by default self.detail_conditioning = ['jawpose', 'expression', 'detail'] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detail_conditioning = self.detail_conditioning else: self.detail_conditioning = model_params.detail_conditioning # deprecated and is not used if 'detailemo_conditioning' not in model_params.keys(): self.detailemo_conditioning = [] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detailemo_conditioning = self.detailemo_conditioning else: self.detailemo_conditioning = model_params.detailemo_conditioning supported_conditioning_keys = ['identity', 'jawpose', 'expression', 'detail', 'detailemo'] for c in self.detail_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") for c in self.detailemo_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) self.mode = DecaMode[str(model_params.mode).upper()] self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" # initialize the emotion perceptual loss (used for EMOCA supervision) self.emonet_loss = None self._init_emotion_loss() # initialize the au perceptual loss (not currently used in EMOCA) self.au_loss = None self._init_au_loss() # initialize the lip reading perceptual loss (not currently used in original EMOCA) self.lipread_loss = None self._init_lipread_loss() # MPL regressor from the encoded space to emotion labels (not used in EMOCA but could be used for direct emotion supervision) if 'mlp_emotion_predictor' in self.deca.config.keys(): # self._build_emotion_mlp(self.deca.config.mlp_emotion_predictor) self.emotion_mlp = EmotionMLP(self.deca.config.mlp_emotion_predictor, model_params) else: self.emotion_mlp = None def get_input_image_size(self): return (self.deca.config.image_size, self.deca.config.image_size) def _instantiate_deca(self, model_params): """ Instantiate the DECA network. """ # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) def _init_emotion_loss(self): """ Initialize the emotion perceptual loss (used for EMOCA supervision) """ if 'emonet_weight' in self.deca.config.keys() and bool(self.deca.config.get('emonet_model_path', False)): if self.emonet_loss is not None: emoloss_force_override = True if 'emoloss_force_override' in self.deca.config.keys() and self.deca.config.emoloss_force_override else False if self.emonet_loss.is_trainable(): if not emoloss_force_override: print("The old emonet loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old emonet loss is trainable but override is set so it will be replaced.") else: print("The old emonet loss is not trainable. It will be replaced.") if 'emonet_model_path' in self.deca.config.keys(): emonet_model_path = self.deca.config.emonet_model_path else: emonet_model_path=None # self.emonet_loss = EmoNetLoss(self.device, emonet=emonet_model_path) emoloss_trainable = True if 'emoloss_trainable' in self.deca.config.keys() and self.deca.config.emoloss_trainable else False emoloss_dual = True if 'emoloss_dual' in self.deca.config.keys() and self.deca.config.emoloss_dual else False normalize_features = self.deca.config.normalize_features if 'normalize_features' in self.deca.config.keys() else None emo_feat_loss = self.deca.config.emo_feat_loss if 'emo_feat_loss' in self.deca.config.keys() else None old_emonet_loss = self.emonet_loss self.emonet_loss = create_emo_loss(self.device, emoloss=emonet_model_path, trainable=emoloss_trainable, dual=emoloss_dual, normalize_features=normalize_features, emo_feat_loss=emo_feat_loss) if old_emonet_loss is not None and type(old_emonet_loss) != self.emonet_loss: print(f"The old emonet loss {old_emonet_loss.__class__.__name__} is replaced during reconfiguration by " f"new emotion loss {self.emonet_loss.__class__.__name__}") else: self.emonet_loss = None def _init_au_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'au_loss' in self.deca.config.keys(): if self.au_loss is not None: force_override = True if 'force_override' in self.deca.config.au_loss.keys() \ and self.deca.config.au_loss.force_override else False if self.au_loss.is_trainable(): if not force_override: print("The old AU loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old AU loss is trainable but override is set so it will be replaced.") else: print("The old AU loss is not trainable. It will be replaced.") old_au_loss = self.emonet_loss self.au_loss = create_au_loss(self.device, self.deca.config.au_loss) else: self.au_loss = None def _init_lipread_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'lipread_loss' in self.deca.config.keys() and self.deca.config.lipread_loss.get('load', True): if self.lipread_loss is not None: force_override = True if 'force_override' in self.deca.config.lipread_loss.keys() \ and self.deca.config.lipread_loss.force_override else False assert self.lipread_loss.is_trainable(), "Trainable lip reading loss is not supported yet." if self.lipread_loss.is_trainable(): if not force_override: print("The old lip reading loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old lip reading loss is trainable but override is set so it will be replaced.") else: print("The old lip reading loss is not trainable. It will be replaced.") # old_lipread_loss = self.emonet_loss self.lipread_loss = LipReadingLoss(self.device, self.deca.config.lipread_loss.lipread_loss) self.lipread_loss.eval() self.lipread_loss.requires_grad_(False) else: self.lipread_loss = None def reconfigure(self, model_params, inout_params, learning_params, stage_name="", downgrade_ok=False, train=True): """ Reconfigure the model. Usually used to switch between detail and coarse stages (which have separate configs) """ if (self.mode == DecaMode.DETAIL and model_params.mode != DecaMode.DETAIL) and not downgrade_ok: raise RuntimeError("You're switching the EMOCA mode from DETAIL to COARSE. Is this really what you want?!") self.inout_params = inout_params self.learning_params = learning_params if self.deca.__class__.__name__ != model_params.deca_class: old_deca_class = self.deca.__class__.__name__ state_dict = self.deca.state_dict() if 'deca_class' in model_params.keys(): deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) else: deca_class = DECA self.deca = deca_class(config=model_params) diff = set(state_dict.keys()).difference(set(self.deca.state_dict().keys())) if len(diff) > 0: raise RuntimeError(f"Some values from old state dict will not be used. This is probably not what you " f"want because it most likely means that the pretrained model's weights won't be used. " f"Maybe you messed up backbone compatibility (i.e. SWIN vs ResNet?) {diff}") ret = self.deca.load_state_dict(state_dict, strict=False) if len(ret.unexpected_keys) > 0: raise print(f"Unexpected keys: {ret.unexpected_keys}") missing_modules = set([s.split(".")[0] for s in ret.missing_keys]) print(f"Missing modules when upgrading from {old_deca_class} to {model_params.deca_class}:") print(missing_modules) else: self.deca._reconfigure(model_params) self._init_emotion_loss() self._init_au_loss() self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" self.mode = DecaMode[str(model_params.mode).upper()] self.train(mode=train) print(f"EMOCA MODE RECONFIGURED TO: {self.mode}") if 'shape_contrain_type' in self.deca.config.keys() and str(self.deca.config.shape_constrain_type).lower() != 'none': shape_constraint = self.deca.config.shape_constrain_type else: shape_constraint = None if 'expression_constrain_type' in self.deca.config.keys() and str(self.deca.config.expression_constrain_type).lower() != 'none': expression_constraint = self.deca.config.expression_constrain_type else: expression_constraint = None if shape_constraint is not None and expression_constraint is not None: raise ValueError("Both shape constraint and expression constraint are active. This is probably not what we want.") def uses_texture(self): """ Check if the model uses texture """ return self.deca.uses_texture() def visualize(self, visdict, savepath, catdim=1): return self.deca.visualize(visdict, savepath, catdim) def train(self, mode: bool = True): # super().train(mode) # not necessary self.deca.train(mode) if self.emotion_mlp is not None: self.emotion_mlp.train(mode) if self.emonet_loss is not None: self.emonet_loss.eval() if self.deca.perceptual_loss is not None: self.deca.perceptual_loss.eval() if self.deca.id_loss is not None: self.deca.id_loss.eval() return self def to(self, *args, **kwargs): super().to(*args, **kwargs) return self def cuda(self, device=None): super().cuda(device) return self def cpu(self): super().cpu() return self def forward(self, batch): values = self.encode(batch, training=False) values = self.decode(values, training=False) return values def _unwrap_list(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return shapecode, texcode, expcode, posecode, cam, lightcode def _unwrap_list_to_dict(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return {'shape': shapecode, 'tex': texcode, 'exp': expcode, 'pose': posecode, 'cam': cam, 'light': lightcode} # return shapecode, texcode, expcode, posecode, cam, lightcode def _encode_flame(self, images, **kwargs): if self.mode == DecaMode.COARSE or \ (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # forward pass with gradients (for coarse stage (used), or detail stage with coarse training (not used)) parameters = self.deca._encode_flame(images, **kwargs) elif self.mode == DecaMode.DETAIL: # in detail stage, the coarse forward pass does not need gradients with torch.no_grad(): parameters = self.deca._encode_flame(images, **kwargs) else: raise ValueError(f"Invalid EMOCA Mode {self.mode}") code_list, original_code = self.deca.decompose_code(parameters) # shapecode, texcode, expcode, posecode, cam, lightcode = code_list # return shapecode, texcode, expcode, posecode, cam, lightcode, original_code return code_list, original_code def _expression_ring_exchange(self, original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode=None, detailemocode=None, exprw=None, lmk_mp=None, mica_images=None): """ Deprecated. Expression ring exchange is not used in EMOCA (nor DECA). """ new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() expcode_new = expcode[new_order] ## append new shape code data expcode = torch.cat([expcode, expcode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) shapecode = torch.cat([shapecode, shapecode], dim=0) globpose = posecode[..., :3] jawpose = posecode[..., 3:] if self.deca.config.expression_constrain_use_jaw_pose: jawpose_new = jawpose[new_order] jawpose = torch.cat([jawpose, jawpose_new], dim=0) else: jawpose = torch.cat([jawpose, jawpose], dim=0) if self.deca.config.expression_constrain_use_global_pose: globpose_new = globpose[new_order] globpose = torch.cat([globpose, globpose_new], dim=0) else: globpose = torch.cat([globpose, globpose], dim=0) if self.deca.config.expression_constrain_use_jaw_pose or self.deca.config.expression_constrain_use_global_pose: posecode = torch.cat([globpose, jawpose], dim=-1) # posecode_new = torch.cat([globpose, jawpose], dim=-1) else: # posecode_new = posecode # posecode_new = posecode posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) # NOTE: # Here we could think about what makes sense to exchange # 1) Do we exchange all emotion GT (VA and expression) within the ring? # 2) Do we exchange only the GT on which the ring is constructed (AffectNet ring based on binned VA or expression or Emonet feature?) # note: if we use EmoMLP that goes from (expression, jawpose, detailcode) -> (v,a,expr) and we exchange # ALL of these, the EmoMLP prediction will of course be the same. The output image still changes, # so EmoNet loss (if used) would be different. Same for the photometric/landmark losses. # TODO: # For now I decided to exchange everything but this should probably be experimented with # I would argue though, that exchanging the GT is the right thing to do if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) if affectnetexp is not None: affectnetexp = torch.cat([affectnetexp, affectnetexp[new_order]], dim=0) if exprw is not None: exprw = torch.cat([exprw, exprw[new_order]], dim=0) if detailcode is not None: #TODO: to exchange or not to exchange, that is the question, the answer is probably NO detailcode = torch.cat([detailcode, detailcode], dim=0) # detailcode = torch.cat([detailcode, detailcode[new_order]], dim=0) if detailemocode is not None: # TODO: to exchange or not to exchange, that is the question, the answer is probably YES detailemocode = torch.cat([detailemocode, detailemocode[new_order]], dim=0) return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, \ detailcode, detailemocode, exprw, lmk_mp, mica_images # return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7 def encode(self, batch, training=True) -> dict: """ Forward encoding pass of the model. Takes a batch of images and returns the corresponding latent codes for each image. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. For a testing pass, the images suffice. :param training: Whether the forward pass is for training or testing. """ codedict = {} original_batch_size = batch['image'].shape[0] images = batch['image'] if 'mica_images' in batch.keys(): mica_images = batch['mica_images'] else: mica_images = None if len(images.shape) == 5: K = images.shape[1] elif len(images.shape) == 4: K = 1 else: raise RuntimeError("Invalid image batch dimensions.") # [B, K, 3, size, size] ==> [BxK, 3, size, size] images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = mica_images.view(-1, mica_images.shape[-3], mica_images.shape[-2], mica_images.shape[-1]) if 'landmark' in batch.keys(): lmk = batch['landmark'] lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if 'landmark_mediapipe' in batch.keys(): lmk_mp = batch['landmark_mediapipe'] lmk_mp = lmk_mp.view(-1, lmk_mp.shape[-2], lmk_mp.shape[-1]) else: lmk_mp = None if 'mask' in batch.keys(): masks = batch['mask'] masks = masks.view(-1, images.shape[-2], images.shape[-1]) # valence / arousal - not necessary unless we want to use VA for supervision (not done in EMOCA) if 'va' in batch: va = batch['va'] va = va.view(-1, va.shape[-1]) else: va = None # 7 basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'expr7' in batch: expr7 = batch['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # affectnet basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'affectnetexp' in batch: affectnetexp = batch['affectnetexp'] affectnetexp = affectnetexp.view(-1, affectnetexp.shape[-1]) else: affectnetexp = None # expression weights if supervising by expression is used (to balance the classification loss) - not done in EMOCA or DECA if 'expression_weight' in batch: exprw = batch['expression_weight'] exprw = exprw.view(-1, exprw.shape[-1]) else: exprw = None # 1) COARSE STAGE # forward pass of the coarse encoder # shapecode, texcode, expcode, posecode, cam, lightcode = self._encode_flame(images) code, original_code = self._encode_flame(images, mica_image=mica_images) shapecode, texcode, expcode, posecode, cam, lightcode = self._unwrap_list(code) if original_code is not None: original_code = self._unwrap_list_to_dict(original_code) if training: # If training, we employ the disentanglement strategy if self.mode == DecaMode.COARSE: if self.deca.config.shape_constrain_type == 'same': ## Enforce that all identity shape codes within ring are the same. The batch is duplicated ## and the duplicated part's shape codes are shuffled. # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) shapecode_idK = shapecode.view(original_batch_size, K, -1) # get mean id shapecode_mean = torch.mean(shapecode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) shapecode_new = shapecode_mean[:, None, :].repeat(1, K, 1) shapecode = shapecode_new.view(-1, self.deca._get_num_shape_params()) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_idK = shapecode_orig.view(original_batch_size, K, -1) shapecode_orig_mean = torch.mean(shapecode_orig_idK, dim=[1]) shapecode_orig_new = shapecode_orig_mean[:, None, :].repeat(1, K, 1) original_code['shape'] = shapecode_orig_new.view(-1, self.deca._get_num_shape_params()) elif self.deca.config.shape_constrain_type == 'exchange': ## Shuffle identitys shape codes within ring (they should correspond to the same identity) ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(self.deca.config.batch_size_train)]) # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_expression': assert original_code is not None ## DEPRECATED, NOT USED IN EMOCA OR DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) # do the same for the original code dict original_code['shape'] = torch.cat([original_code['shape'], original_code['shape']], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp'][new_order]], dim=0) original_global_pose = original_code['pose'][:, :3] original_jaw_pose = original_code['pose'][:, 3:] original_jaw_pose = torch.cat([original_jaw_pose, original_jaw_pose[new_order]], dim=0) original_global_pose = torch.cat([original_global_pose, original_global_pose], dim=0) original_code['pose'] = torch.cat([original_global_pose, original_jaw_pose], dim=1) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_shape': ## The shape codes are shuffled without duplication new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) original_code['ref_images_identity_idxs'] = ref_images_identity_idxs original_code['ref_images_expression_idxs'] = ref_images_expression_idxs elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'same': ## NOT USED IN EMOCA OR DECA, deprecated # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) expcode_idK = expcode.view(original_batch_size, K, -1) # get mean id expcode_mean = torch.mean(expcode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) expcode = expcode_new.view(-1, self.deca._get_num_shape_params()) # do the same thing for the original code dict expcode_idK = original_code['exp'].view(original_batch_size, K, -1) expcode_mean = torch.mean(expcode_idK, dim=[1]) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) original_code['exp'] = expcode_new.view(-1, self.deca._get_num_shape_params()) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': ## NOT USED IN EMOCA OR DECA, deprecated expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, \ masks, va, expr7, affectnetexp, _, _, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, None, None, exprw, lmk_mp, mica_images) # (self, original_batch_size, K, # expcode, posecode, shapecode, lightcode, texcode, # images, cam, lmk, masks, va, expr7, affectnetexp, # detailcode=None, detailemocode=None, exprw=None): # 2) DETAIL STAGE if self.mode == DecaMode.DETAIL: all_detailcode = self.deca.E_detail(images) # identity-based detail code detailcode = all_detailcode[:, :self.deca.n_detail] # detail emotion code is deprecated and will be empty detailemocode = all_detailcode[:, self.deca.n_detail:(self.deca.n_detail + self.deca.n_detail_emo)] if training: # If training, we employ the disentanglement strategy if self.deca.config.detail_constrain_type == 'exchange': # Identity within the same ring should be the same, so they should have the same code. # This can be enforced by shuffling. The batch is duplicated and the duplicated part's code shuffled ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # this creates a per-ring random permutation. The detail exchange happens ONLY between the same # identities (within the ring) but not outside (no cross-identity detail exchange) new_order = np.array( # [np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) [np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) ## append new shape code data shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_expression': ## Deprecated and not used in EMOCA or DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange emotion code, but not (identity-based) detailcode detailemocode_new = detailemocode[new_order] detailemocode = torch.cat([detailemocode, detailemocode_new], dim=0) detailcode = torch.cat([detailcode, detailcode], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_shape': ## Shuffles teh shape code without duplicating the batch new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) # exchange (identity-based) detailcode, but not emotion code detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images) codedict['shapecode'] = shapecode codedict['texcode'] = texcode codedict['expcode'] = expcode codedict['posecode'] = posecode codedict['cam'] = cam codedict['lightcode'] = lightcode if self.mode == DecaMode.DETAIL: codedict['detailcode'] = detailcode codedict['detailemocode'] = detailemocode codedict['images'] = images if mica_images is not None: codedict['mica_images'] = mica_images if 'mask' in batch.keys(): codedict['masks'] = masks if 'landmark' in batch.keys(): codedict['lmk'] = lmk if lmk_mp is not None: codedict['lmk_mp'] = lmk_mp if 'va' in batch.keys(): codedict['va'] = va if 'expr7' in batch.keys(): codedict['expr7'] = expr7 if 'affectnetexp' in batch.keys(): codedict['affectnetexp'] = affectnetexp if 'expression_weight' in batch.keys(): codedict['expression_weight'] = exprw if original_code is not None: codedict['original_code'] = original_code return codedict def _create_conditioning_lists(self, codedict, condition_list): detail_conditioning_list = [] if 'globalpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, :3]] if 'jawpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, 3:]] if 'identity' in condition_list: detail_conditioning_list += [codedict["shapecode"]] if 'expression' in condition_list: detail_conditioning_list += [codedict["expcode"]] if isinstance(self.deca.D_detail, Generator): # the detail codes might be excluded from conditioning based on the Generator architecture (for instance # for AdaIn Generator) if 'detail' in condition_list: detail_conditioning_list += [codedict["detailcode"]] if 'detailemo' in condition_list: detail_conditioning_list += [codedict["detailemocode"]] return detail_conditioning_list def decode(self, codedict, training=True, render=True, **kwargs) -> dict: """ Forward decoding pass of the model. Takes the latent code predicted by the encoding stage and reconstructs and renders the shape. :param codedict: Batch dict of the predicted latent codes :param training: Whether the forward pass is for training or testing. """ shapecode = codedict['shapecode'] expcode = codedict['expcode'] posecode = codedict['posecode'] texcode = codedict['texcode'] cam = codedict['cam'] lightcode = codedict['lightcode'] images = codedict['images'] if 'masks' in codedict.keys(): masks = codedict['masks'] else: masks = None effective_batch_size = images.shape[0] # this is the current batch size after all training augmentations modifications # 1) Reconstruct the face mesh # FLAME - world space if not isinstance(self.deca.flame, FLAME_mediapipe): verts, landmarks2d, landmarks3d = self.deca.flame(shape_params=shapecode, expression_params=expcode, pose_params=posecode) landmarks2d_mediapipe = None else: verts, landmarks2d, landmarks3d, landmarks2d_mediapipe = self.deca.flame(shapecode, expcode, posecode) # world to camera trans_verts = util.batch_orth_proj(verts, cam) predicted_landmarks = util.batch_orth_proj(landmarks2d, cam)[:, :, :2] # camera to image space trans_verts[:, :, 1:] = -trans_verts[:, :, 1:] predicted_landmarks[:, :, 1:] = - predicted_landmarks[:, :, 1:] if landmarks2d_mediapipe is not None: predicted_landmarks_mediapipe = util.batch_orth_proj(landmarks2d_mediapipe, cam)[:, :, :2] predicted_landmarks_mediapipe[:, :, 1:] = - predicted_landmarks_mediapipe[:, :, 1:] if self.uses_texture(): albedo = self.deca.flametex(texcode) else: # if not using texture, default to gray albedo = torch.ones([effective_batch_size, 3, self.deca.config.uv_size, self.deca.config.uv_size], device=images.device) * 0.5 # 2) Render the coarse image if render: ops = self.deca.render(verts, trans_verts, albedo, lightcode) # mask mask_face_eye = F.grid_sample(self.deca.uv_face_eye_mask.expand(effective_batch_size, -1, -1, -1), ops['grid'].detach(), align_corners=False) # images predicted_images = ops['images'] # predicted_images = ops['images'] * mask_face_eye * ops['alpha_images'] # predicted_images_no_mask = ops['images'] #* mask_face_eye * ops['alpha_images'] segmentation_type = None if isinstance(self.deca.config.useSeg, bool): if self.deca.config.useSeg: segmentation_type = 'gt' else: segmentation_type = 'rend' elif isinstance(self.deca.config.useSeg, str): segmentation_type = self.deca.config.useSeg else: raise RuntimeError(f"Invalid 'useSeg' type: '{type(self.deca.config.useSeg)}'") if segmentation_type not in ["gt", "rend", "intersection", "union"]: raise ValueError(f"Invalid segmentation type for masking '{segmentation_type}'") if masks is None: # if mask not provided, the only mask available is the rendered one segmentation_type = 'rend' elif masks.shape[-1] != predicted_images.shape[-1] or masks.shape[-2] != predicted_images.shape[-2]: # resize masks if need be (this is only done if configuration was changed at some point after training) dims = masks.ndim == 3 if dims: masks = masks[:, None, :, :] masks = F.interpolate(masks, size=predicted_images.shape[-2:], mode='bilinear') if dims: masks = masks[:, 0, ...] # resize images if need be (this is only done if configuration was changed at some point after training) if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') else: images_resized = images # what type of segmentation we use if segmentation_type == "gt": # GT stands for external segmetnation predicted by face parsing or similar masks = masks[:, None, :, :] elif segmentation_type == "rend": # mask rendered as a silhouette of the face mesh masks = mask_face_eye * ops['alpha_images'] elif segmentation_type == "intersection": # intersection of the two above masks = masks[:, None, :, :] * mask_face_eye * ops['alpha_images'] elif segmentation_type == "union": # union of the first two options masks = torch.max(masks[:, None, :, :], mask_face_eye * ops['alpha_images']) else: raise RuntimeError(f"Invalid segmentation type for masking '{segmentation_type}'") if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed predicted_images = (1. - masks) * images_resized + masks * predicted_images else: predicted_images = (1. - masks) * images + masks * predicted_images elif self.deca.config.background_from_input in [False, "black"]: predicted_images = masks * predicted_images elif self.deca.config.background_from_input in ["none"]: predicted_images = predicted_images else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # 3) Render the detail image if self.mode == DecaMode.DETAIL: detailcode = codedict['detailcode'] detailemocode = codedict['detailemocode'] # a) Create the detail conditioning lists detail_conditioning_list = self._create_conditioning_lists(codedict, self.detail_conditioning) detailemo_conditioning_list = self._create_conditioning_lists(codedict, self.detailemo_conditioning) final_detail_conditioning_list = detail_conditioning_list + detailemo_conditioning_list # b) Pass the detail code and the conditions through the detail generator to get displacement UV map if isinstance(self.deca.D_detail, Generator): uv_z = self.deca.D_detail(torch.cat(final_detail_conditioning_list, dim=1)) elif isinstance(self.deca.D_detail, GeneratorAdaIn): uv_z = self.deca.D_detail(z=torch.cat([detailcode, detailemocode], dim=1), cond=torch.cat(final_detail_conditioning_list, dim=1)) else: raise ValueError(f"This class of generarator is not supported: '{self.deca.D_detail.__class__.__name__}'") # if there is a displacement mask, apply it (DEPRECATED and not USED in DECA or EMOCA) if hasattr(self.deca, 'displacement_mask') and self.deca.displacement_mask is not None: if 'apply_displacement_masks' in self.deca.config.keys() and self.deca.config.apply_displacement_masks: uv_z = uv_z * self.deca.displacement_mask # uv_z = self.deca.D_detail(torch.cat([posecode[:, 3:], expcode, detailcode], dim=1)) # render detail if render: detach_from_coarse_geometry = not self.deca.config.train_coarse uv_detail_normals, uv_coarse_vertices = self.deca.displacement2normal(uv_z, verts, ops['normals'], detach=detach_from_coarse_geometry) uv_shading = self.deca.render.add_SHlight(uv_detail_normals, lightcode.detach()) uv_texture = albedo.detach() * uv_shading # batch size X image_rows X image_cols X 2 # you can query the grid for UV values of the face mesh at pixel locations grid = ops['grid'] if detach_from_coarse_geometry: # if the grid is detached, the gradient of the positions of UV-values in image space won't flow back to the geometry grid = grid.detach() predicted_detailed_image = F.grid_sample(uv_texture, grid, align_corners=False) if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed # images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') ## before bugfix # predicted_images = (1. - masks) * images_resized + masks * predicted_images ## after bugfix predicted_detailed_image = (1. - masks) * images_resized + masks * predicted_detailed_image else: predicted_detailed_image = (1. - masks) * images + masks * predicted_detailed_image elif self.deca.config.background_from_input in [False, "black"]: predicted_detailed_image = masks * predicted_detailed_image elif self.deca.config.background_from_input in ["none"]: predicted_detailed_image = predicted_detailed_image else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # --- extract texture uv_pverts = self.deca.render.world2uv(trans_verts).detach() uv_gt = F.grid_sample(torch.cat([images_resized, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') uv_texture_gt = uv_gt[:, :3, :, :].detach() uv_mask_gt = uv_gt[:, 3:, :, :].detach() # self-occlusion normals = util.vertex_normals(trans_verts, self.deca.render.faces.expand(effective_batch_size, -1, -1)) uv_pnorm = self.deca.render.world2uv(normals) uv_mask = (uv_pnorm[:, -1, :, :] < -0.05).float().detach() uv_mask = uv_mask[:, None, :, :] ## combine masks uv_vis_mask = uv_mask_gt * uv_mask * self.deca.uv_face_eye_mask else: uv_detail_normals = None predicted_detailed_image = None ## 4) (Optional) NEURAL RENDERING - not used in neither DECA nor EMOCA # If neural rendering is enabled, the differentiable rendered synthetic images are translated using an image translation net (such as StarGan) predicted_translated_image = None predicted_detailed_translated_image = None translated_uv_texture = None if render: if self.deca._has_neural_rendering(): predicted_translated_image = self.deca.image_translator( { "input_image" : predicted_images, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_images.shape[0], dtype=torch.int64, device=predicted_images.device) } ) if self.mode == DecaMode.DETAIL: predicted_detailed_translated_image = self.deca.image_translator( { "input_image" : predicted_detailed_image, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_detailed_image.shape[0], dtype=torch.int64, device=predicted_detailed_image.device) } ) translated_uv = F.grid_sample(torch.cat([predicted_detailed_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') translated_uv_texture = translated_uv[:, :3, :, :].detach() else: predicted_detailed_translated_image = None translated_uv_texture = None # no need in coarse mode # translated_uv = F.grid_sample(torch.cat([predicted_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], # mode='bilinear') # translated_uv_texture = translated_uv_gt[:, :3, :, :].detach() if self.emotion_mlp is not None: codedict = self.emotion_mlp(codedict, "emo_mlp_") # populate the value dict for metric computation/visualization if render: codedict['predicted_images'] = predicted_images codedict['predicted_detailed_image'] = predicted_detailed_image codedict['predicted_translated_image'] = predicted_translated_image codedict['ops'] = ops codedict['normals'] = ops['normals'] codedict['mask_face_eye'] = mask_face_eye codedict['verts'] = verts codedict['albedo'] = albedo codedict['landmarks2d'] = landmarks2d codedict['landmarks3d'] = landmarks3d codedict['predicted_landmarks'] = predicted_landmarks if landmarks2d_mediapipe is not None: codedict['predicted_landmarks_mediapipe'] = predicted_landmarks_mediapipe codedict['trans_verts'] = trans_verts codedict['masks'] = masks if self.mode == DecaMode.DETAIL: if render: codedict['predicted_detailed_translated_image'] = predicted_detailed_translated_image codedict['translated_uv_texture'] = translated_uv_texture codedict['uv_texture_gt'] = uv_texture_gt codedict['uv_texture'] = uv_texture codedict['uv_detail_normals'] = uv_detail_normals codedict['uv_shading'] = uv_shading codedict['uv_vis_mask'] = uv_vis_mask codedict['uv_mask'] = uv_mask codedict['uv_z'] = uv_z codedict['displacement_map'] = uv_z + self.deca.fixed_uv_dis[None, None, :, :] return codedict def _compute_emotion_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, va=None, expr7=None, with_grad=True, batch_size=None, ring_size=None): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) else: d = metric_dict with torch.no_grad(): emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) # EmoNet self-consistency loss terms if emo_feat_loss_1 is not None: loss_or_metric(prefix + '_emonet_feat_1_L1', emo_feat_loss_1 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_1 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_feat_2_L1', emo_feat_loss_2 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_2 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_valence_L1', valence_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_valence and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_arousal_L1', arousal_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_arousal and self.deca.config.use_emonet_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.emonet_weight) # KL seems to be causing NaN's loss_or_metric(prefix + '_emonet_expression_L1',expression_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_expression and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_combined', ((emo_feat_loss_1 if emo_feat_loss_1 is not None else 0) + emo_feat_loss_2 + valence_loss + arousal_loss + expression_loss) * self.deca.config.emonet_weight, self.deca.config.use_emonet_combined and self.deca.config.use_emonet_loss) # Log also the VA metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() input_ex = np.argmax(input_ex, axis=1).mean() output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() output_ex = np.argmax(output_ex, axis=1).mean() metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict # TODO: uncomment this after you handle the case when certain entries are NaN (GT missing, not a bug) # if va is not None: # d[prefix + 'emo_sup_val_L1'] = F.l1_loss(self.emonet_loss.output_emotion['valence'], va[:, 0]) \ # * self.deca.config.gt_emotion_reg # d[prefix + 'emo_sup_ar_L1'] = F.l1_loss(self.emonet_loss.output_emotion['arousal'], va[:, 1]) \ # * self.deca.config.gt_emotion_reg # # metric_dict[prefix + "_valence_gt"] = va[:, 0].mean().detach() # metric_dict[prefix + "_arousal_gt"] = va[:, 1].mean().detach() # # if expr7 is not None: # affectnet_gt = [expr7_to_affect_net(int(expr7[i])).value for i in range(len(expr7))] # affectnet_gt = torch.tensor(np.array(affectnet_gt), device=self.device, dtype=torch.long) # d[prefix + '_emo_sup_expr_CE'] = F.cross_entropy(self.emonet_loss.output_emotion['expression'], affectnet_gt) * self.deca.config.gt_emotion_reg # metric_dict[prefix + "_expr_gt"] = affectnet_gt.mean().detach() def _compute_au_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, au=None, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) else: d = metric_dict with torch.no_grad(): au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) # EmoNet self-consistency loss terms if au_feat_loss_1 is not None: loss_or_metric(prefix + '_au_feat_1_L1', au_feat_loss_1 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_1 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_feat_2_L1', au_feat_loss_2 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_2 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_loss', au_loss * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_aus and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + '_au_losses_L1', arousal_loss * self.deca.config.au_loss.au_weight, # self.deca.config.au_loss.use_emonet_arousal and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.au_loss.au_weight) # KL seems to be causing NaN's # # Log also the VA # metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() # metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() # metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() # metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() # input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # input_ex = np.argmax(input_ex, axis=1).mean() # output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # output_ex = np.argmax(output_ex, axis=1).mean() # metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) # metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict def _cut_mouth_vectorized(self, images, landmarks, convert_grayscale=True): # mouth_window_margin = 12 mouth_window_margin = 1 # not temporal mouth_crop_height = 96 mouth_crop_width = 96 mouth_landmark_start_idx = 48 mouth_landmark_stop_idx = 68 B, T = images.shape[:2] landmarks = landmarks.to(torch.float32) with torch.no_grad(): image_size = images.shape[-1] / 2 landmarks = landmarks * image_size + image_size # #1) smooth the landmarks with temporal convolution # landmarks are of shape (T, 68, 2) # reshape to (T, 136) landmarks_t = landmarks.reshape(*landmarks.shape[:2], -1) # make temporal dimension last landmarks_t = landmarks_t.permute(0, 2, 1) # change chape to (N, 136, T) # landmarks_t = landmarks_t.unsqueeze(0) # smooth with temporal convolution temporal_filter = torch.ones(mouth_window_margin, device=images.device) / mouth_window_margin # pad the the landmarks landmarks_t_padded = F.pad(landmarks_t, (mouth_window_margin // 2, mouth_window_margin // 2), mode='replicate') # convolve each channel separately with the temporal filter num_channels = landmarks_t.shape[1] if temporal_filter.numel() > 1: smooth_landmarks_t = F.conv1d(landmarks_t_padded, temporal_filter.unsqueeze(0).unsqueeze(0).expand(num_channels,1,temporal_filter.numel()), groups=num_channels, padding='valid' ) smooth_landmarks_t = smooth_landmarks_t[..., 0:landmarks_t.shape[-1]] else: smooth_landmarks_t = landmarks_t # reshape back to the original shape smooth_landmarks_t = smooth_landmarks_t.permute(0, 2, 1).view(landmarks.shape) smooth_landmarks_t = smooth_landmarks_t + landmarks.mean(dim=2, keepdims=True) - smooth_landmarks_t.mean(dim=2, keepdims=True) # #2) get the mouth landmarks mouth_landmarks_t = smooth_landmarks_t[..., mouth_landmark_start_idx:mouth_landmark_stop_idx, :] # #3) get the mean of the mouth landmarks mouth_landmarks_mean_t = mouth_landmarks_t.mean(dim=-2, keepdims=True) # #4) get the center of the mouth center_x_t = mouth_landmarks_mean_t[..., 0] center_y_t = mouth_landmarks_mean_t[..., 1] # #5) use grid_sample to crop the mouth in every image # create the grid height = mouth_crop_height//2 width = mouth_crop_width//2 torch.arange(0, mouth_crop_width, device=images.device) grid = torch.stack(torch.meshgrid(torch.linspace(-height, height, mouth_crop_height).to(images.device) / (images.shape[-2] /2), torch.linspace(-width, width, mouth_crop_width).to(images.device) / (images.shape[-1] /2) ), dim=-1) grid = grid[..., [1, 0]] grid = grid.unsqueeze(0).unsqueeze(0).repeat(*images.shape[:2], 1, 1, 1) center_x_t -= images.shape[-1] / 2 center_y_t -= images.shape[-2] / 2 center_x_t /= images.shape[-1] / 2 center_y_t /= images.shape[-2] / 2 grid = grid + torch.cat([center_x_t, center_y_t ], dim=-1).unsqueeze(-2).unsqueeze(-2) images = images.view(B*T, *images.shape[2:]) grid = grid.view(B*T, *grid.shape[2:]) if convert_grayscale: images = F_v.rgb_to_grayscale(images) image_crops = F.grid_sample( images, grid, align_corners=True, padding_mode='zeros', mode='bicubic' ) image_crops = image_crops.view(B, T, *image_crops.shape[1:]) if convert_grayscale: image_crops = image_crops#.squeeze(1) # import matplotlib.pyplot as plt # plt.figure() # plt.imshow(image_crops[0, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 20].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 20].permute(1,2,0).cpu().numpy()) # plt.show() return image_crops def _compute_lipread_loss(self, images, predicted_images, landmarks, predicted_landmarks, loss_dict, metric_dict, prefix, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # shape of images is: (B, R, C, H, W) # convert to (B * R, 1, H, W, C) images = images.unsqueeze(1) predicted_images = predicted_images.unsqueeze(1) landmarks = landmarks.unsqueeze(1) predicted_landmarks = predicted_landmarks.unsqueeze(1) # cut out the mouth region images_mouth = self._cut_mouth_vectorized(images, landmarks) predicted_images_mouth = self._cut_mouth_vectorized(predicted_images, predicted_landmarks) # make sure that the lip reading net interprests things with depth=1, # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) else: d = metric_dict with torch.no_grad(): loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) d[prefix + '_lipread'] = loss * self.deca.config.lipread_loss.weight def _metric_or_loss(self, loss_dict, metric_dict, is_loss): if is_loss: d = loss_dict else: d = metric_dict return d def _compute_id_loss(self, codedict, batch, training, testing, losses, batch_size, ring_size): # if self.deca.config.idw > 1e-3: if self.deca.id_loss is not None: images = codedict["images"] ops = codedict["ops"] mask_face_eye = codedict["mask_face_eye"] shading_images = self.deca.render.add_SHlight(ops['normal_images'], codedict["lightcode"].detach()) albedo_images = F.grid_sample(codedict["albedo"].detach(), ops['grid'], align_corners=False) # TODO: get to the bottom of this weird overlay thing - why is it there? # answer: This renders the face and takes background from the image overlay = albedo_images * shading_images * mask_face_eye + images * (1 - mask_face_eye) if self.global_step >= self.deca.id_loss_start_step: if 'id_metric' in self.deca.config.keys() and 'barlow_twins' in self.deca.config.id_metric: assert ring_size == 1 or ring_size == 2 effective_bs = images.shape[0] # losses['identity'] = self.deca.id_loss(overlay, images, batch_size=batch_size, # ring_size=ring_size) * self.deca.config.idw if "ref_images_identity_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for identity loss images_ = images[codedict["ref_images_identity_idxs"]] else: images_ = images losses['identity'] = self.deca.id_loss(overlay, images_, batch_size=effective_bs, ring_size=1) * self.deca.config.idw if 'id_contrastive' in self.deca.config.keys() and bool(self.deca.config.id_contrastive): if ring_size == 2: assert effective_bs % 2 == 0 assert self.deca.id_loss.trainable has_been_shuffled = 'new_order' in codedict.keys() idxs_a = torch.arange(0, images.shape[0], 2) # indices of first images within the ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second images within the ring # WARNING - this assumes the ring is identity-based if self.deca.config.id_contrastive in [True, "real", "both"]: # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] losses['identity_contrastive_real'] = self.deca.id_loss( images_0, # first images within the ring images_1, # second images within the ring batch_size=images_0.shape[0], ring_size=1) * self.deca.config.idw * 2 if self.deca.config.id_contrastive in [True, "synth", "both"]: if self.deca.config.shape_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings overlay_0 = overlay[idxs_a] overlay_1 = overlay[idxs_b] else: #if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: overlay_0 = overlay[0:batch_size * ring_size:2] overlay_1 = overlay[1:batch_size * ring_size:2] losses['identity_contrastive_synthetic'] = self.deca.id_loss( overlay_0, # first images within the ring overlay_1, # second images within the ring batch_size=overlay_0.shape[0], ring_size=1) * self.deca.config.idw if has_been_shuffled: new_order = codedict['new_order'] # TODO: compare the idxs to these: # codedict["ref_images_identity_idxs"] if self.deca.config.shape_constrain_type == 'shuffle_expression': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch else: raise NotImplementedError("Unexpected shape consistency value ") # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["shapecode"][idxs_a_synth].allclose(codedict["shapecode"][idxs_b_synth]) losses['identity_contrastive_synthetic_shuffled'] = self.deca.id_loss( overlay[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw losses['identity_contrastive_synthetic2real_shuffled'] = self.deca.id_loss( images[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw elif ring_size > 2: raise NotImplementedError("Contrastive loss does not support ring sizes > 2.") return losses def _compute_emonet_loss_wrapper(self, codedict, batch, training, testing, losses, metrics, prefix, image_key, with_grad, batch_size, ring_size): if self.emonet_loss is not None: if 'va' in codedict: va = codedict['va'] va = va.view(-1, va.shape[-1]) else: va = None if 'expr7' in codedict: expr7 = codedict['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # with torch.no_grad(): # TODO: if expression shuffled, this needs to be changed, the input images no longer correspond images = codedict["images"] predicted_images = codedict[image_key] effective_bs = images.shape[0] if "ref_images_expression_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for emotion loss images_ = images[codedict["ref_images_expression_idxs"]] else: images_ = images effective_bs = images.shape[0] self._compute_emotion_loss(images_, predicted_images, losses, metrics, f"{prefix}", va, expr7, with_grad=with_grad, batch_size=effective_bs, ring_size=1) codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] codedict[f"{prefix}_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if 'emo_contrastive' in self.deca.config.keys() and self.deca.config.emo_contrastive: assert ring_size == 2 or ring_size == 1 assert self.emonet_loss.trainable or ( hasattr(self.emonet_loss, 'clone_is_trainable') and self.emonet_lossclone_is_trainable) has_been_shuffled = 'new_order' in codedict.keys() # if self.deca.config.shape_constrain_type == 'shuffle_expression' and has_been_shuffled: # new_order = codedict['new_order'] # if self.deca.config.emo_contrastive in [True, "real", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA (or its datasets).") # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] self._compute_emotion_loss(images_0, # real images of first expressions in the ring images_1, # real images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_real", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=images_0.shape[0], ring_size=1) else: print("[WARNING] Cannot compute real contrastive emotion loss because there is no ring!") if self.deca.config.emo_contrastive in [True, "synth", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 idxs_a = torch.arange(0, images.shape[0], 2) # indices of first expressions within a ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second expressions within a ring if 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings predicted_images_0 = predicted_images[idxs_a] predicted_images_1 = predicted_images[idxs_b] raise RuntimeError("This should work but it was never tested or intended. Make sure this works.") else: # if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: predicted_images_0 = predicted_images[0:batch_size * ring_size:2] predicted_images_1 = predicted_images[1:batch_size * ring_size:2] if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA.") self._compute_emotion_loss(predicted_images_0, # rec images of first expressions in the ring predicted_images_1, # rec images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_synth", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=predicted_images_1.shape[0], ring_size=1) else: print("[WARNING] Cannot compute synthetic contrastive emotion loss because there is no ring!") if has_been_shuffled: new_order = codedict['new_order'] if self.deca.config.shape_constrain_type == 'shuffle_expression': # this gets tricky, in this case the images are not duplicates -> we need all, but the second # half's order is shuffled, so we need to be careful here idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["expcode"][idxs_a_synth].allclose(codedict["expcode"][idxs_b_synth]) # the expressions at corresponding index positions of idxs_a_synth and idxs_b_synth should match now self._compute_emotion_loss(predicted_images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) self._compute_emotion_loss(images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth2real_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) if va is not None: codedict[f"{prefix}_valence_gt"] = va[:, 0] codedict[f"{prefix}_arousal_gt"] = va[:, 1] if expr7 is not None: codedict[f"{prefix}_expression_gt"] = expr7 if self.deca._has_neural_rendering(): assert 'emo_contrastive' not in self.deca.config.keys() or self.deca.config.emo_contrastive is False # TODO possible to make this more GPU efficient by not recomputing emotion for input image self._compute_emotion_loss(images, predicted_translated_image, losses, metrics, f"{prefix}_translated", va, expr7, with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=1) # codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion['expression'] codedict[f"{prefix}_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_translated_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] return losses, metrics, codedict def _compute_loss(self, codedict, batch, training=True, testing=False): #### ----------------------- Losses losses = {} metrics = {} predicted_landmarks = codedict["predicted_landmarks"] predicted_landmarks_mediapipe = codedict.get("predicted_landmarks_mediapipe", None) if "lmk" in codedict.keys(): lmk = codedict["lmk"] else: lmk = None if "lmk_mp" in codedict.keys(): lmk_mp = codedict["lmk_mp"] else: lmk_mp = None if "masks" in codedict.keys(): masks = codedict["masks"] else: masks = None batch_size = codedict["predicted_images"].shape[0] use_geom_losses = 'use_geometric_losses_expression_exchange' in self.deca.config.keys() and \ self.deca.config.use_geometric_losses_expression_exchange if training and ('expression_constrain_type' in self.deca.config.keys() \ and ('expression_constrain_type' in self.deca.config.keys() and self.deca.config.expression_constrain_type == 'exchange') or ( 'shape_constrain_type' in self.deca.config.keys() and self.deca.config.shape_constrain_type in ['shuffle_expression', 'shuffle_shape'])) \ and (self.deca.mode == DecaMode.COARSE or self.deca.config.train_coarse) \ and (not use_geom_losses): if batch_size % 2 != 0: raise RuntimeError("The batch size should be even because it should have " f"got doubled in expression ring exchange. Instead it was odd: {batch_size}") # THIS IS DONE BECAUSE LANDMARK AND PHOTOMETRIC LOSSES MAKE NO SENSE FOR EXPRESSION EXCHANGE geom_losses_idxs = batch_size // 2 else: geom_losses_idxs = batch_size predicted_images = codedict["predicted_images"] images = codedict["images"] lightcode = codedict["lightcode"] albedo = codedict["albedo"] mask_face_eye = codedict["mask_face_eye"] shapecode = codedict["shapecode"] expcode = codedict["expcode"] texcode = codedict["texcode"] ops = codedict["ops"] if self.mode == DecaMode.DETAIL: uv_texture = codedict["uv_texture"] uv_texture_gt = codedict["uv_texture_gt"] # this determines the configured batch size that is currently used (training, validation or testing) # the reason why this is important is because of potential multi-gpu training and loss functions (such as Barlow Twins) # that might need the full size of the batch (not just the chunk of the current GPU). if training: bs = self.learning_params.batch_size_train rs = self.learning_params.train_K else: if not testing: bs = self.learning_params.batch_size_val rs = self.learning_params.val_K else: bs = self.learning_params.batch_size_test rs = self.learning_params.test_K ## COARSE loss only if self.mode == DecaMode.COARSE or (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # landmark losses (only useful if coarse model is being trained # if training or lmk is not None: if lmk is not None: # if self.deca.config.use_landmarks: # d = losses # else: # d = metrics d = self._metric_or_loss(losses, metrics, self.deca.config.use_landmarks) if self.deca.config.useWlmk: d['landmark'] = \ lossfunc.weighted_landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight else: d['landmark'] = \ lossfunc.landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight d = self._metric_or_loss(losses, metrics, 'use_eye_distance' not in self.deca.config.keys() or self.deca.config.use_eye_distance) # losses['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks, lmk) * self.deca.config.lmk_weight * 2 d['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.eyed d = self._metric_or_loss(losses, metrics, 'use_lip_distance' not in self.deca.config.keys() or self.deca.config.use_lip_distance) d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd d = self._metric_or_loss(losses, metrics, 'use_mouth_corner_distance' in self.deca.config.keys() and self.deca.config.use_mouth_corner_distance) d['mouth_corner_distance'] = lossfunc.mouth_corner_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd if predicted_landmarks_mediapipe is not None and lmk_mp is not None: use_mediapipe_landmarks = self.deca.config.get('use_mediapipe_landmarks', False) d = self._metric_or_loss(losses, metrics, use_mediapipe_landmarks) d['landmark_mediapipe'] =lossfunc_mp.landmark_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_eye_distance_mediapipe', False) ) d['eye_distance_mediapipe'] = lossfunc_mp.eyed_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.eyed_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_lip_distance_mediapipe', False) ) d['lip_distance_mediapipe'] = lossfunc_mp.lipd_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_mouth_corner_distance_mediapipe', False)) d['mouth_corner_distance_mediapipe'] = lossfunc_mp.mouth_corner_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp #TODO: fix this on the next iteration lipd_loss # d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks, lmk) * self.deca.config.lipd # photometric loss # if training or masks is not None: if masks is not None: # if self.deca.config.use_photometric: # d = losses # else: # d = metrics # d['photometric_texture'] = (masks * (predicted_images - images).abs()).mean() * self.deca.config.photow photometric = masks[:geom_losses_idxs, ...] * ((predicted_images[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()) if 'photometric_normalization' not in self.deca.config.keys() or self.deca.config.photometric_normalization == 'mean': photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'rel_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'neg_rel_mask_value': mu = 1. - masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'inv_rel_mask_value': mu = 1./ masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'abs_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].sum(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() else: raise ValueError(f"Invalid photometric loss normalization: '{self.deca.config.photometric_normalization}'") self._metric_or_loss(losses, metrics, self.deca.config.use_photometric)['photometric_texture'] = \ photometric * self.deca.config.photow if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_images[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_translated_image = codedict["predicted_translated_image"] photometric_translated = (masks[:geom_losses_idxs, ...] * ( predicted_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_photometric: losses['photometric_translated_texture'] = photometric_translated else: metrics['photometric_translated_texture'] = photometric_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_translated'] = vggl * self.deca.config.vggw else: raise ValueError("Is this line ever reached?") losses = self._compute_id_loss(codedict, batch, training, testing, losses, batch_size=bs, ring_size=rs) losses['shape_reg'] = (torch.sum(shapecode ** 2) / 2) * self.deca.config.shape_reg losses['expression_reg'] = (torch.sum(expcode ** 2) / 2) * self.deca.config.exp_reg losses['tex_reg'] = (torch.sum(texcode ** 2) / 2) * self.deca.config.tex_reg losses['light_reg'] = ((torch.mean(lightcode, dim=2)[:, :, None] - lightcode) ** 2).mean() * self.deca.config.light_reg if 'original_code' in codedict.keys(): # original jaw pose regularization if self.deca.config.get('exp_deca_jaw_pose', False) and \ 'deca_jaw_reg' in self.deca.config.keys() and self.deca.config.deca_jaw_reg > 0: jaw_pose_orig = codedict['original_code']['pose'][:, 3:] jaw_pose = codedict['posecode'][..., 3:] deca_jaw_pose_reg = (torch.sum((jaw_pose - jaw_pose_orig) ** 2) / 2) * self.deca.config.deca_jaw_reg losses['deca_jaw_pose_reg'] = deca_jaw_pose_reg if self.deca.config.get('exp_deca_global_pose', False) and \ 'deca_global_reg' in self.deca.config.keys() and self.deca.config.deca_global_reg > 0: global_pose_orig = codedict['original_code']['pose'][:, :3] global_pose = codedict['posecode'][..., :3] global_pose_reg = (torch.sum((global_pose - global_pose_orig) ** 2) / 2) * self.deca.config.deca_global_reg losses['deca_global_pose_reg'] = global_pose_reg # original expression regularization if 'deca_expression_reg' in self.deca.config.keys() and self.deca.config.deca_expression_reg > 0: expression_orig = codedict['original_code']['exp'] expression = codedict['expcode'] deca_expression_reg = (torch.sum((expression - expression_orig) ** 2) / 2) * self.deca.config.deca_expression_reg losses['deca_expression_reg'] = deca_expression_reg losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse", image_key="predicted_images", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse_translated", image_key="predicted_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs ) if self.au_loss is not None: # with torch.no_grad(): self._compute_au_loss(images, predicted_images, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_translated_image, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) if self.lipread_loss is not None: # with torch.no_grad(): self._compute_lipread_loss(images, predicted_images, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_lipread_loss(images, predicted_translated_image, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and self.deca._has_neural_rendering()) ## DETAIL loss only if self.mode == DecaMode.DETAIL: predicted_detailed_image = codedict["predicted_detailed_image"] uv_z = codedict["uv_z"] # UV displacement map uv_shading = codedict["uv_shading"] uv_vis_mask = codedict["uv_vis_mask"] # uv_mask of what is visible photometric_detailed = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_detailed_texture'] = photometric_detailed else: metrics['photometric_detailed_texture'] = photometric_detailed if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_detailed'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_detailed_translated_image = codedict["predicted_detailed_translated_image"] photometric_detailed_translated = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_translated_detailed_texture'] = photometric_detailed_translated else: metrics['photometric_translated_detailed_texture'] = photometric_detailed_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)[ 'vgg_detailed_translated'] = vggl * self.deca.config.vggw losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail", image_key = "predicted_detailed_image", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail_translated", image_key="predicted_detailed_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) # if self.emonet_loss is not None: # self._compute_emotion_loss(images, predicted_detailed_image, losses, metrics, "detail", # with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # codedict["detail_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict["detail_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict["detail_expression_input"] = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # codedict["detail_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # # if va is not None: # codedict["detail_valence_gt"] = va[:,0] # codedict["detail_arousal_gt"] = va[:,1] # if expr7 is not None: # codedict["detail_expression_gt"] = expr7 # if self.deca._has_neural_rendering(): # #TODO possible to make this more GPU efficient by not recomputing emotion for input image # self._compute_emotion_loss(images, predicted_detailed_translated_image, # losses, metrics, "detail_translated", # va, expr7, # with_grad= self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # # # codedict["coarse_valence_input"] = self.emonet_loss.input_emotion['valence'] # # codedict["coarse_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # # codedict["coarse_expression_input"] = self.emonet_loss.input_emotion['expression'] # codedict["detail_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_translated_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if self.au_loss is not None: self._compute_au_loss(images, predicted_images, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_detailed_translated_image, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) for pi in range(3): # self.deca.face_attr_mask.shape[0]): if self.deca.config.sfsw[pi] != 0: # if pi==0: new_size = 256 # else: # new_size = 128 # if self.deca.config.uv_size != 256: # new_size = 128 uv_texture_patch = F.interpolate( uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_texture_gt_patch = F.interpolate( uv_texture_gt[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_vis_mask_patch = F.interpolate( uv_vis_mask[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') detail_l1 = (uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1 and not self.deca._has_neural_rendering(): losses['detail_l1_{}'.format(pi)] = detail_l1 else: metrics['detail_l1_{}'.format(pi)] = detail_l1 if self.deca.config.use_detail_mrf and not self.deca._has_neural_rendering(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_mrf_{}'.format(pi)] = mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_mrf_{}'.format(pi)] = mrf if self.deca._has_neural_rendering(): # raise NotImplementedError("Gotta implement the texture extraction first.") translated_uv_texture = codedict["translated_uv_texture"] translated_uv_texture_patch = F.interpolate( translated_uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') translated_detail_l1 = (translated_uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1: losses['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 else: metrics['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 if self.deca.config.use_detail_mrf: translated_mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_translated_mrf_{}'.format(pi)] = translated_mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_translated_mrf_{}'.format(pi)] = mrf # Old piece of debug code. Good to delete. # if pi == 2: # uv_texture_gt_patch_ = uv_texture_gt_patch # uv_texture_patch_ = uv_texture_patch # uv_vis_mask_patch_ = uv_vis_mask_patch losses['z_reg'] = torch.mean(uv_z.abs()) * self.deca.config.zregw losses['z_diff'] = lossfunc.shading_smooth_loss(uv_shading) * self.deca.config.zdiffw nonvis_mask = (1 - util.binary_erosion(uv_vis_mask)) losses['z_sym'] = (nonvis_mask * (uv_z - torch.flip(uv_z, [-1]).detach()).abs()).sum() * self.deca.config.zsymw if self.emotion_mlp is not None:# and not testing: mlp_losses, mlp_metrics = self.emotion_mlp.compute_loss( codedict, batch, training=training, pred_prefix="emo_mlp_") for key in mlp_losses.keys(): if key in losses.keys(): raise RuntimeError(f"Duplicate loss label {key}") losses[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_losses[key] for key in mlp_metrics.keys(): if key in metrics.keys(): raise RuntimeError(f"Duplicate metric label {key}") # let's report the metrics (which are a superset of losses when it comes to EmoMLP) without the weight, # it's hard to plot the metrics otherwise metrics[key] = mlp_metrics[key] # metrics[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_metrics[key] # else: # uv_texture_gt_patch_ = None # uv_texture_patch_ = None # uv_vis_mask_patch_ = None return losses, metrics def compute_loss(self, values, batch, training=True, testing=False) -> dict: """ The function used to compute the loss on a training batch. : training should be set to true when calling from training_step only """ losses, metrics = self._compute_loss(values, batch, training=training, testing=testing) all_loss = 0. losses_key = losses.keys() for key in losses_key: all_loss = all_loss + losses[key] # losses['all_loss'] = all_loss losses = {'loss_' + key: value for key, value in losses.items()} # add prefix loss for better logging losses['loss'] = all_loss # add metrics that do not effect the loss function (if any) for key in metrics.keys(): losses['metric_' + key] = metrics[key] return losses def _val_to_be_logged(self, d): if not hasattr(self, 'val_dict_list'): self.val_dict_list = [] self.val_dict_list += [d] def _train_to_be_logged(self, d): if not hasattr(self, 'train_dict_list'): self.train_dict_list = [] self.train_dict_list += [d] def validation_step(self, batch, batch_idx, dataloader_idx=None): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ with torch.no_grad(): training = False values = self.encode(batch, training=training) values = self.decode(values, training=training) losses_and_metrics = self.compute_loss(values, batch, training=training) #### self.log_dict(losses_and_metrics, on_step=False, on_epoch=True) # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'val_' # losses_and_metrics_to_log = {prefix + dataloader_str +'_val_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # log val_loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log[stage_str + 'loss'] = losses_and_metrics_to_log[prefix + '_' + stage_str + 'loss'] losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss # self._val_to_be_logged(losses_and_metrics_to_log) if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch # recommended if self.trainer.is_global_zero: if self.deca.config.val_vis_frequency > 0: if batch_idx % self.deca.config.val_vis_frequency == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, stage_str[:-1], prefix) vis_dict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) # image = Image(grid_image, caption="full visualization") # vis_dict[prefix + '_val_' + "visualization"] = image if isinstance(self.logger, WandbLogger): self.logger.log_metrics(vis_dict) return None def _get_logging_prefix(self): prefix = self.stage_name + str(self.mode.name).lower() return prefix def test_step(self, batch, batch_idx, dataloader_idx=None): """ Testing step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations without gradient :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ prefix = self._get_logging_prefix() losses_and_metrics_to_log = {} # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'test_' with torch.no_grad(): training = False testing = True values = self.encode(batch, training=training) values = self.decode(values, training=training) if 'mask' in batch.keys(): losses_and_metrics = self.compute_loss(values, batch, training=False, testing=testing) # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} else: losses_and_metric = None # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) # losses_and_metrics_to_log[stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss if self.logger is not None: # self.logger.log_metrics(losses_and_metrics_to_log) self.log_dict(losses_and_metrics_to_log, sync_dist=True, on_step=False, on_epoch=True) # if self.global_step % 200 == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] if self.deca.config.test_vis_frequency > 0: # Log visualizations every once in a while if batch_idx % self.deca.config.test_vis_frequency == 0: # if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, self.global_step, stage_str[:-1], prefix) visdict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) self.logger.log_metrics(visdict) return None @property def process(self): if not hasattr(self,"process_"): self.process_ = psutil.Process(os.getpid()) return self.process_ def training_step(self, batch, batch_idx, *args, **kwargs): #, debug=True): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ values = self.encode(batch, training=True) values = self.decode(values, training=True) losses_and_metrics = self.compute_loss(values, batch, training=True) uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_train_' + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + "train_" + 'mem_usage'] = self.process.memory_info().rss # losses_and_metrics_to_log['train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log['train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log['train_' + 'step'] = self.global_step losses_and_metrics_to_log['train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log["train_" + 'mem_usage'] = self.process.memory_info().rss # log loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log['loss'] = losses_and_metrics_to_log[prefix + '_train_loss'] if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended if self.deca.config.train_vis_frequency > 0: if self.global_step % self.deca.config.train_vis_frequency == 0: if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, "train", prefix) visdict = self._create_visualizations_to_log('train', visualizations, values, batch_idx, indices=0) if isinstance(self.logger, WandbLogger): self.logger.log_metrics(visdict)#, step=self.global_step) # self.log_dict(visdict, sync_dist=True) # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=False) # log per step # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True) # log per both # return losses_and_metrics return losses_and_metrics['loss'] ### STEP ENDS ARE PROBABLY NOT NECESSARY BUT KEEP AN EYE ON THEM IF MULI-GPU TRAINING DOESN'T WORK # def training_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def validation_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def _step_end(self, batch_parts): # # gpu_0_prediction = batch_parts.pred[0]['pred'] # # gpu_1_prediction = batch_parts.pred[1]['pred'] # N = len(batch_parts) # loss_dict = {} # for key in batch_parts[0]: # for i in range(N): # if key not in loss_dict.keys(): # loss_dict[key] = batch_parts[i] # else: # loss_dict[key] = batch_parts[i] # loss_dict[key] = loss_dict[key] / N # return loss_dict def vae_2_str(self, valence=None, arousal=None, affnet_expr=None, expr7=None, prefix=""): caption = "" if len(prefix) > 0: prefix += "_" if valence is not None and not np.isnan(valence).any(): caption += prefix + "valence= %.03f\n" % valence if arousal is not None and not np.isnan(arousal).any(): caption += prefix + "arousal= %.03f\n" % arousal if affnet_expr is not None and not np.isnan(affnet_expr).any(): caption += prefix + "expression= %s \n" % AffectNetExpressions(affnet_expr).name if expr7 is not None and not np.isnan(expr7).any(): caption += prefix +"expression= %s \n" % Expression7(expr7).name return caption def _create_visualizations_to_log(self, stage, visdict, values, step, indices=None, dataloader_idx=None, output_dir=None): mode_ = str(self.mode.name).lower() prefix = self._get_logging_prefix() output_dir = output_dir or self.inout_params.full_run_dir log_dict = {} for key in visdict.keys(): images = _torch_image2np(visdict[key]) if images.dtype == np.float32 or images.dtype == np.float64 or images.dtype == np.float16: images = np.clip(images, 0, 1) if indices is None: indices = np.arange(images.shape[0]) if isinstance(indices, int): indices = [indices,] if isinstance(indices, str) and indices == 'all': image = np.concatenate([images[i] for i in range(images.shape[0])], axis=1) savepath = Path(f'{output_dir}/{prefix}_{stage}/{key}/{self.current_epoch:04d}_{step:04d}_all.png') # im2log = Image(image, caption=key) if isinstance(self.logger, WandbLogger): im2log = _log_wandb_image(savepath, image) else: im2log = _log_array_image(savepath, image) name = prefix + "_" + stage + "_" + key if dataloader_idx is not None: name += "/dataloader_idx_" + str(dataloader_idx) log_dict[name] = im2log else: for i in indices: caption = key + f" batch_index={step}\n" caption += key + f" index_in_batch={i}\n" if self.emonet_loss is not None: if key == 'inputs': if mode_ + "_valence_input" in values.keys(): caption += self.vae_2_str( values[mode_ + "_valence_input"][i].detach().cpu().item(), values[mode_ + "_arousal_input"][i].detach().cpu().item(), np.argmax(values[mode_ + "_expression_input"][i].detach().cpu().numpy()), prefix="emonet") + "\n" if 'va' in values.keys() and mode_ + "valence_gt" in values.keys(): # caption += self.vae_2_str( # values[mode_ + "_valence_gt"][i].detach().cpu().item(), # values[mode_ + "_arousal_gt"][i].detach().cpu().item(), caption += self.vae_2_str( values[mode_ + "valence_gt"][i].detach().cpu().item(), values[mode_ + "arousal_gt"][i].detach().cpu().item(), prefix="gt") + "\n" if 'expr7' in values.keys() and mode_ + "_expression_gt" in values.keys(): caption += "\n" + self.vae_2_str( expr7=values[mode_ + "_expression_gt"][i].detach().cpu().numpy(), prefix="gt") + "\n" if 'affectnetexp' in values.keys() and mode_ + "_expression_gt" in values.keys(): caption += "\n" + self.vae_2_str( affnet_expr=values[mode_ + "_expression_gt"][i].detach().cpu().numpy(), prefix="gt") + "\n" elif 'geometry_detail' in key: if "emo_mlp_valence" in values.keys(): caption += self.vae_2_str( values["emo_mlp_valence"][i].detach().cpu().item(), values["emo_mlp_arousal"][i].detach().cpu().item(), prefix="mlp") if 'emo_mlp_expr_classification' in values.keys(): caption += "\n" + self.vae_2_str( affnet_expr=values["emo_mlp_expr_classification"][i].detach().cpu().argmax().numpy(), prefix="mlp") + "\n" elif key == 'output_images_' + mode_: if mode_ + "_valence_output" in values.keys(): caption += self.vae_2_str(values[mode_ + "_valence_output"][i].detach().cpu().item(), values[mode_ + "_arousal_output"][i].detach().cpu().item(), np.argmax(values[mode_ + "_expression_output"][i].detach().cpu().numpy())) + "\n" elif key == 'output_translated_images_' + mode_: if mode_ + "_translated_valence_output" in values.keys(): caption += self.vae_2_str(values[mode_ + "_translated_valence_output"][i].detach().cpu().item(), values[mode_ + "_translated_arousal_output"][i].detach().cpu().item(), np.argmax(values[mode_ + "_translated_expression_output"][i].detach().cpu().numpy())) + "\n" # elif key == 'output_images_detail': # caption += "\n" + self.vae_2_str(values["detail_output_valence"][i].detach().cpu().item(), # values["detail_output_valence"][i].detach().cpu().item(), # np.argmax(values["detail_output_expression"][ # i].detach().cpu().numpy())) savepath = Path(f'{output_dir}/{prefix}_{stage}/{key}/{self.current_epoch:04d}_{step:04d}_{i:02d}.png') image = images[i] # im2log = Image(image, caption=caption) if isinstance(self.logger, WandbLogger): im2log = _log_wandb_image(savepath, image, caption) elif self.logger is not None: im2log = _log_array_image(savepath, image, caption) else: im2log = _log_array_image(None, image, caption) name = prefix + "_" + stage + "_" + key if dataloader_idx is not None: name += "/dataloader_idx_" + str(dataloader_idx) log_dict[name] = im2log return log_dict def _visualization_checkpoint(self, verts, trans_verts, ops, uv_detail_normals, additional, batch_idx, stage, prefix, save=False): batch_size = verts.shape[0] visind = np.arange(batch_size) shape_images = self.deca.render.render_shape(verts, trans_verts) if uv_detail_normals is not None: detail_normal_images = F.grid_sample(uv_detail_normals.detach(), ops['grid'].detach(), align_corners=False) shape_detail_images = self.deca.render.render_shape(verts, trans_verts, detail_normal_images=detail_normal_images) else: shape_detail_images = None visdict = {} if 'images' in additional.keys(): visdict['inputs'] = additional['images'][visind] if 'images' in additional.keys() and 'lmk' in additional.keys(): visdict['landmarks_gt'] = util.tensor_vis_landmarks(additional['images'][visind], additional['lmk'][visind]) if 'images' in additional.keys() and 'predicted_landmarks' in additional.keys(): visdict['landmarks_predicted'] = util.tensor_vis_landmarks(additional['images'][visind], additional['predicted_landmarks'][visind]) if 'predicted_images' in additional.keys(): visdict['output_images_coarse'] = additional['predicted_images'][visind] if 'predicted_translated_image' in additional.keys() and additional['predicted_translated_image'] is not None: visdict['output_translated_images_coarse'] = additional['predicted_translated_image'][visind] visdict['geometry_coarse'] = shape_images[visind] if shape_detail_images is not None: visdict['geometry_detail'] = shape_detail_images[visind] if 'albedo_images' in additional.keys(): visdict['albedo_images'] = additional['albedo_images'][visind] if 'masks' in additional.keys(): visdict['mask'] = additional['masks'].repeat(1, 3, 1, 1)[visind] if 'albedo' in additional.keys(): visdict['albedo'] = additional['albedo'][visind] if 'predicted_detailed_image' in additional.keys() and additional['predicted_detailed_image'] is not None: visdict['output_images_detail'] = additional['predicted_detailed_image'][visind] if 'predicted_detailed_translated_image' in additional.keys() and additional['predicted_detailed_translated_image'] is not None: visdict['output_translated_images_detail'] = additional['predicted_detailed_translated_image'][visind] if 'shape_detail_images' in additional.keys(): visdict['shape_detail_images'] = additional['shape_detail_images'][visind] if 'uv_detail_normals' in additional.keys(): visdict['uv_detail_normals'] = additional['uv_detail_normals'][visind] * 0.5 + 0.5 if 'uv_texture_patch' in additional.keys(): visdict['uv_texture_patch'] = additional['uv_texture_patch'][visind] if 'uv_texture_gt' in additional.keys(): visdict['uv_texture_gt'] = additional['uv_texture_gt'][visind] if 'translated_uv_texture' in additional.keys() and additional['translated_uv_texture'] is not None: visdict['translated_uv_texture'] = additional['translated_uv_texture'][visind] if 'uv_vis_mask_patch' in additional.keys(): visdict['uv_vis_mask_patch'] = additional['uv_vis_mask_patch'][visind] if save: savepath = f'{self.inout_params.full_run_dir}/{prefix}_{stage}/combined/{self.current_epoch:04d}_{batch_idx:04d}.png' Path(savepath).parent.mkdir(exist_ok=True, parents=True) visualization_image = self.deca.visualize(visdict, savepath) return visdict, visualization_image[..., [2, 1, 0]] else: visualization_image = None return visdict, None def _get_trainable_parameters(self): trainable_params = [] if self.mode == DecaMode.COARSE: trainable_params += self.deca._get_coarse_trainable_parameters() elif self.mode == DecaMode.DETAIL: trainable_params += self.deca._get_detail_trainable_parameters() else: raise ValueError(f"Invalid deca mode: {self.mode}") if self.emotion_mlp is not None: trainable_params += list(self.emotion_mlp.parameters()) if self.emonet_loss is not None: trainable_params += self.emonet_loss._get_trainable_params() if self.deca.id_loss is not None: trainable_params += self.deca.id_loss._get_trainable_params() return trainable_params def configure_optimizers(self): # optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) print("Configuring optimizer") trainable_params = self._get_trainable_parameters() if self.learning_params.optimizer == 'Adam': self.deca.opt = torch.optim.Adam( trainable_params, lr=self.learning_params.learning_rate, amsgrad=False) elif self.config.learning.optimizer == 'AdaBound': self.deca.opt = adabound.AdaBound( trainable_params, lr=self.config.learning.learning_rate, final_lr=self.config.learning.final_learning_rate ) elif self.learning_params.optimizer == 'SGD': self.deca.opt = torch.optim.SGD( trainable_params, lr=self.learning_params.learning_rate) else: raise ValueError(f"Unsupported optimizer: '{self.learning_params.optimizer}'") optimizers = [self.deca.opt] schedulers = [] if 'learning_rate_decay' in self.learning_params.keys(): scheduler = torch.optim.lr_scheduler.ExponentialLR(self.deca.opt, gamma=self.learning_params.learning_rate_decay) schedulers += [scheduler] if len(schedulers) == 0: return self.deca.opt return optimizers, schedulers class DECA(torch.nn.Module): """ The original DECA class which contains the encoders, FLAME decoder and the detail decoder. """ def __init__(self, config): """ :config corresponds to a model_params from DecaModule """ super().__init__() # ID-MRF perceptual loss (kept here from the original DECA implementation) self.perceptual_loss = None # Face Recognition loss self.id_loss = None # VGG feature loss self.vgg_loss = None self._reconfigure(config) self._reinitialize() def _dirty_init(self): pass # not used here, implemented for EMICA def get_input_image_size(self): return (self.config.image_size, self.config.image_size) def _reconfigure(self, config): self.config = config self.n_param = config.n_shape + config.n_tex + config.n_exp + config.n_pose + config.n_cam + config.n_light # identity-based detail code self.n_detail = config.n_detail # emotion-based detail code (deprecated, not use by DECA or EMOCA) self.n_detail_emo = config.n_detail_emo if 'n_detail_emo' in config.keys() else 0 # count the size of the conidition vector if 'detail_conditioning' in self.config.keys(): self.n_cond = 0 if 'globalpose' in self.config.detail_conditioning: self.n_cond += 3 if 'jawpose' in self.config.detail_conditioning: self.n_cond += 3 if 'identity' in self.config.detail_conditioning: self.n_cond += config.n_shape if 'expression' in self.config.detail_conditioning: self.n_cond += config.n_exp else: self.n_cond = 3 + config.n_exp self.mode = DecaMode[str(config.mode).upper()] self._create_detail_generator() self._init_deep_losses() self._setup_neural_rendering() def _reinitialize(self): self._create_model() self._setup_renderer() self._init_deep_losses() self.face_attr_mask = util.load_local_mask(image_size=self.config.uv_size, mode='bbx') def _get_num_shape_params(self): return self.config.n_shape def _init_deep_losses(self): """ Initialize networks for deep losses """ # TODO: ideally these networks should be moved out the DECA class and into DecaModule, # but that would break backwards compatility with the original DECA and would not be able to load DECA's weights if 'mrfwr' not in self.config.keys() or self.config.mrfwr == 0: self.perceptual_loss = None else: if self.perceptual_loss is None: self.perceptual_loss = lossfunc.IDMRFLoss().eval() self.perceptual_loss.requires_grad_(False) # TODO, move this to the constructor if 'idw' not in self.config.keys() or self.config.idw == 0: self.id_loss = None else: if self.id_loss is None: id_metric = self.config.id_metric if 'id_metric' in self.config.keys() else None id_trainable = self.config.id_trainable if 'id_trainable' in self.config.keys() else False self.id_loss_start_step = self.config.id_loss_start_step if 'id_loss_start_step' in self.config.keys() else 0 self.id_loss = lossfunc.VGGFace2Loss(self.config.pretrained_vgg_face_path, id_metric, id_trainable) self.id_loss.freeze_nontrainable_layers() if 'vggw' not in self.config.keys() or self.config.vggw == 0: self.vgg_loss = None else: if self.vgg_loss is None: vgg_loss_batch_norm = 'vgg_loss_batch_norm' in self.config.keys() and self.config.vgg_loss_batch_norm self.vgg_loss = VGG19Loss(dict(zip(self.config.vgg_loss_layers, self.config.lambda_vgg_layers)), batch_norm=vgg_loss_batch_norm).eval() self.vgg_loss.requires_grad_(False) # TODO, move this to the constructor def _setup_renderer(self): self.render = SRenderY(self.config.image_size, obj_filename=self.config.topology_path, uv_size=self.config.uv_size) # .to(self.device) # face mask for rendering details mask = imread(self.config.face_mask_path).astype(np.float32) / 255. mask = torch.from_numpy(mask[:, :, 0])[None, None, :, :].contiguous() self.uv_face_mask = F.interpolate(mask, [self.config.uv_size, self.config.uv_size]) mask = imread(self.config.face_eye_mask_path).astype(np.float32) / 255. mask = torch.from_numpy(mask[:, :, 0])[None, None, :, :].contiguous() uv_face_eye_mask = F.interpolate(mask, [self.config.uv_size, self.config.uv_size]) self.register_buffer('uv_face_eye_mask', uv_face_eye_mask) # displacement mask is deprecated and not used by DECA or EMOCA if 'displacement_mask' in self.config.keys(): displacement_mask_ = 1-np.load(self.config.displacement_mask).astype(np.float32) # displacement_mask_ = np.load(self.config.displacement_mask).astype(np.float32) displacement_mask_ = torch.from_numpy(displacement_mask_)[None, None, ...].contiguous() displacement_mask_ = F.interpolate(displacement_mask_, [self.config.uv_size, self.config.uv_size]) self.register_buffer('displacement_mask', displacement_mask_) ## displacement correct if os.path.isfile(self.config.fixed_displacement_path): fixed_dis = np.load(self.config.fixed_displacement_path) fixed_uv_dis = torch.tensor(fixed_dis).float() else: fixed_uv_dis = torch.zeros([512, 512]).float() print("Warning: fixed_displacement_path not found, using zero displacement") self.register_buffer('fixed_uv_dis', fixed_uv_dis) def uses_texture(self): if 'use_texture' in self.config.keys(): return self.config.use_texture return True # true by default def _disable_texture(self, remove_from_model=False): self.config.use_texture = False if remove_from_model: self.flametex = None def _enable_texture(self): self.config.use_texture = True def _has_neural_rendering(self): return hasattr(self.config, "neural_renderer") and bool(self.config.neural_renderer) def _setup_neural_rendering(self): if self._has_neural_rendering(): if self.config.neural_renderer.class_ == "StarGAN": print("Creating StarGAN neural renderer") self.image_translator = StarGANWrapper(self.config.neural_renderer.cfg, self.config.neural_renderer.stargan_repo) else: raise ValueError(f"Unsupported neural renderer class '{self.config.neural_renderer.class_}'") if self.image_translator.background_mode == "input": if self.config.background_from_input not in [True, "input"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "Background should be inpainted from the input") elif self.image_translator.background_mode == "black": if self.config.background_from_input not in [False, "black"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "Background should be black.") elif self.image_translator.background_mode == "none": if self.config.background_from_input not in ["none"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "The background should not be handled") else: raise NotImplementedError(f"Unsupported mode of the neural renderer backroungd: " f"'{self.image_translator.background_mode}'") def _create_detail_generator(self): #backwards compatibility hack: if hasattr(self, 'D_detail'): if (not "detail_conditioning_type" in self.config.keys() or self.config.detail_conditioning_type == "concat") \ and isinstance(self.D_detail, Generator): return if self.config.detail_conditioning_type == "adain" and isinstance(self.D_detail, GeneratorAdaIn): return print("[WARNING]: We are reinitializing the detail generator!") del self.D_detail # just to make sure we free the CUDA memory, probably not necessary if not "detail_conditioning_type" in self.config.keys() or str(self.config.detail_conditioning_type).lower() == "concat": # concatenates detail latent and conditioning (this one is used by DECA/EMOCA) print("Creating classic detail generator.") self.D_detail = Generator(latent_dim=self.n_detail + self.n_detail_emo + self.n_cond, out_channels=1, out_scale=0.01, sample_mode='bilinear') elif str(self.config.detail_conditioning_type).lower() == "adain": # conditioning passed in through adain layers (this one is experimental and not currently used) print("Creating AdaIn detail generator.") self.D_detail = GeneratorAdaIn(self.n_detail + self.n_detail_emo, self.n_cond, out_channels=1, out_scale=0.01, sample_mode='bilinear') else: raise NotImplementedError(f"Detail conditioning invalid: '{self.config.detail_conditioning_type}'") def _create_model(self): # 1) build coarse encoder e_flame_type = 'ResnetEncoder' if 'e_flame_type' in self.config.keys(): e_flame_type = self.config.e_flame_type if e_flame_type == 'ResnetEncoder': self.E_flame = ResnetEncoder(outsize=self.n_param) elif e_flame_type[:4] == 'swin': self.E_flame = SwinEncoder(outsize=self.n_param, img_size=self.config.image_size, swin_type=e_flame_type) else: raise ValueError(f"Invalid 'e_flame_type' = {e_flame_type}") flame_cfg = copy.deepcopy(self.config) flame_cfg.n_shape = self._get_num_shape_params() if 'flame_mediapipe_lmk_embedding_path' not in flame_cfg.keys(): self.flame = FLAME(flame_cfg) else: self.flame = FLAME_mediapipe(flame_cfg) if self.uses_texture(): self.flametex = FLAMETex(self.config) else: self.flametex = None # 2) build detail encoder e_detail_type = 'ResnetEncoder' if 'e_detail_type' in self.config.keys(): e_detail_type = self.config.e_detail_type if e_detail_type == 'ResnetEncoder': self.E_detail = ResnetEncoder(outsize=self.n_detail + self.n_detail_emo) elif e_flame_type[:4] == 'swin': self.E_detail = SwinEncoder(outsize=self.n_detail + self.n_detail_emo, img_size=self.config.image_size, swin_type=e_detail_type) else: raise ValueError(f"Invalid 'e_detail_type'={e_detail_type}") self._create_detail_generator() # self._load_old_checkpoint() def _get_coarse_trainable_parameters(self): print("Add E_flame.parameters() to the optimizer") return list(self.E_flame.parameters()) def _get_detail_trainable_parameters(self): trainable_params = [] if self.config.train_coarse: trainable_params += self._get_coarse_trainable_parameters() print("Add E_flame.parameters() to the optimizer") trainable_params += list(self.E_detail.parameters()) print("Add E_detail.parameters() to the optimizer") trainable_params += list(self.D_detail.parameters()) print("Add D_detail.parameters() to the optimizer") return trainable_params def train(self, mode: bool = True): super().train(mode) if mode: if self.mode == DecaMode.COARSE: self.E_flame.train() # print("Setting E_flame to train") self.E_detail.eval() # print("Setting E_detail to eval") self.D_detail.eval() # print("Setting D_detail to eval") elif self.mode == DecaMode.DETAIL: if self.config.train_coarse: # print("Setting E_flame to train") self.E_flame.train() else: # print("Setting E_flame to eval") self.E_flame.eval() self.E_detail.train() # print("Setting E_detail to train") self.D_detail.train() # print("Setting D_detail to train") else: raise ValueError(f"Invalid mode '{self.mode}'") else: self.E_flame.eval() # print("Setting E_flame to eval") self.E_detail.eval() # print("Setting E_detail to eval") self.D_detail.eval() # print("Setting D_detail to eval") # these are set to eval no matter what, they're never being trained (the FLAME shape and texture spaces are pretrained) self.flame.eval() if self.flametex is not None: self.flametex.eval() return self def _load_old_checkpoint(self): """ Loads the DECA model weights from the original DECA implementation: https://github.com/YadiraF/DECA """ if self.config.resume_training: model_path = self.config.pretrained_modelpath print(f"Loading model state from '{model_path}'") checkpoint = torch.load(model_path) # model util.copy_state_dict(self.E_flame.state_dict(), checkpoint['E_flame']) # util.copy_state_dict(self.opt.state_dict(), checkpoint['opt']) # deprecate # detail model if 'E_detail' in checkpoint.keys(): util.copy_state_dict(self.E_detail.state_dict(), checkpoint['E_detail']) util.copy_state_dict(self.D_detail.state_dict(), checkpoint['D_detail']) # training state self.start_epoch = 0 # checkpoint['epoch'] self.start_iter = 0 # checkpoint['iter'] else: print('Start training from scratch') self.start_epoch = 0 self.start_iter = 0 def _encode_flame(self, images, **kwargs): return self.E_flame(images) def decompose_code(self, code): ''' config.n_shape + config.n_tex + config.n_exp + config.n_pose + config.n_cam + config.n_light ''' code_list = [] # num_list = [self.config.n_shape, self.config.n_tex, self.config.n_exp, self.config.n_pose, self.config.n_cam, # self.config.n_light] num_list = [self._get_num_shape_params(), self.config.n_tex, self.config.n_exp, self.config.n_pose, self.config.n_cam, self.config.n_light] start = 0 for i in range(len(num_list)): code_list.append(code[:, start:start + num_list[i]]) start = start + num_list[i] # shapecode, texcode, expcode, posecode, cam, lightcode = code_list code_list[-1] = code_list[-1].reshape(code.shape[0], 9, 3) return code_list, None def displacement2normal(self, uv_z, coarse_verts, coarse_normals, detach=True): """ Converts the displacement uv map (uv_z) and coarse_verts to a normal map coarse_normals. """ batch_size = uv_z.shape[0] uv_coarse_vertices = self.render.world2uv(coarse_verts)#.detach() if detach: uv_coarse_vertices = uv_coarse_vertices.detach() uv_coarse_normals = self.render.world2uv(coarse_normals)#.detach() if detach: uv_coarse_normals = uv_coarse_normals.detach() uv_z = uv_z * self.uv_face_eye_mask # detail vertices = coarse vertice + predicted displacement*normals + fixed displacement*normals uv_detail_vertices = uv_coarse_vertices + \ uv_z * uv_coarse_normals + \ self.fixed_uv_dis[None, None, :,:] * uv_coarse_normals #.detach() dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]) uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1)) uv_detail_normals = uv_detail_normals.reshape( [batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0, 3, 1, 2) # uv_detail_normals = uv_detail_normals*self.uv_face_eye_mask + uv_coarse_normals*(1-self.uv_face_eye_mask) # uv_detail_normals = util.gaussian_blur(uv_detail_normals) return uv_detail_normals, uv_coarse_vertices def visualize(self, visdict, savepath, catdim=1): grids = {} for key in visdict: # print(key) if visdict[key] is None: continue grids[key] = torchvision.utils.make_grid( F.interpolate(visdict[key], [self.config.image_size, self.config.image_size])).detach().cpu() grid = torch.cat(list(grids.values()), catdim) grid_image = (grid.numpy().transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]] grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8) if savepath is not None: cv2.imwrite(savepath, grid_image) return grid_image def create_mesh(self, opdict, dense_template): ''' vertices: [nv, 3], tensor texture: [3, h, w], tensor ''' i = 0 vertices = opdict['verts'][i].cpu().numpy() faces = self.render.faces[0].cpu().numpy() if 'uv_texture_gt' in opdict.keys(): texture = util.tensor2image(opdict['uv_texture_gt'][i]) else: texture = None uvcoords = self.render.raw_uvcoords[0].cpu().numpy() uvfaces = self.render.uvfaces[0].cpu().numpy() # save coarse mesh, with texture and normal map if 'uv_detail_normals' in opdict.keys(): normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5) # upsample mesh, save detailed mesh texture = texture[:, :, [2, 1, 0]] normals = opdict['normals'][i].cpu().numpy() displacement_map = opdict['displacement_map'][i].detach().cpu().numpy().squeeze() dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, dense_template) else: normal_map = None dense_vertices = None dense_colors = None dense_faces = None return vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors def save_obj(self, filename, opdict, dense_template, mode ='detail'): if mode not in ['coarse', 'detail', 'both']: raise ValueError(f"Invalid mode '{mode}. Expected modes are: 'coarse', 'detail', 'both'") vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors \ = self.create_mesh(opdict, dense_template) if mode == 'both': if isinstance(filename, list): filename_coarse = filename[0] filename_detail = filename[1] else: filename_coarse = filename filename_detail = filename.replace('.obj', '_detail.obj') elif mode == 'coarse': filename_coarse = filename else: filename_detail = filename if mode in ['coarse', 'both']: util.write_obj(str(filename_coarse), vertices, faces, texture=texture, uvcoords=uvcoords, uvfaces=uvfaces, normal_map=normal_map) if mode in ['detail', 'both']: util.write_obj(str(filename_detail), dense_vertices, dense_faces, colors = dense_colors, inverse_face_order=True) class ExpDECAInterface(object): """ This serves as an interface for EMOCA-like classes that need to use a different sub class but retain the EMOCA functionality. See EMICA_v2 for an example. """ def _create_model(self): # E_flame should be fixed for expression EMOCA self.E_flame.requires_grad_(False) # 2) add expression decoder if self.config.expression_backbone == 'deca_parallel': ## a) Attach a parallel flow of FCs onto the original DECA coarse backbone. (Only the second FC head is trainable) self.E_expression = SecondHeadResnet(self.E_flame, self.n_exp_param, 'same') elif self.config.expression_backbone == 'deca_clone': ## b) Clones the original DECA coarse decoder (and the entire decoder will be trainable) - This is in final EMOCA. #TODO this will only work for Resnet. Make this work for the other backbones (Swin) as well. self.E_expression = ResnetEncoder(self.n_exp_param) # clone parameters of the ResNet self.E_expression.encoder.load_state_dict(self.E_flame.encoder.state_dict()) elif self.config.expression_backbone == 'emonet_trainable': # Trainable EmoNet instead of Resnet (deprecated)
self.E_expression = EmoNetRegressor(self.n_exp_param)
21
2023-11-07 20:13:32+00:00
24k
codefuse-ai/Collinear-Constrained-Attention
model/build_model.py
[ { "identifier": "get_model_params_num", "path": "utils/common_utils.py", "snippet": "def get_model_params_num(model):\n \"\"\"\n Get params number of the model\n Args:\n model: model(required)\n Returns:\n the number of parameters of model\n \"\"\"\n num = 0\n for _, p...
import os import torch import sys import peft import model.peft.modeling_peft # noqa import bitsandbytes as bnb # noqa import accelerate # noqa from utils.common_utils import get_model_params_num from transformers import ( # noqa: E402 CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast ) from .gpt_neox.configuration_gpt_neox import GPTNeoXConfig from .gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM from .gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from .llama.configuration_llama import LlamaConfig from .llama.modeling_llama import LlamaForCausalLM from .llama.tokenization_llama import LlamaTokenizer from .llama.tokenization_llama_fast import LlamaTokenizerFast from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, StateDictType, ) from utils.common_utils import print_rank_0, is_old_version from tokenizer import build_tokenizer from tokenizer.tokenizer import HFTokenizer from peft.tuners.lora import LoraLayer from model.peft.utils import prepare_model_for_kbit_training from peft import ( # noqa LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, TaskType, get_peft_model ) from model.peft.tuner import AdaLoraConfig from transformers import BitsAndBytesConfig from packaging import version from .glm.tokenization_glm_deprecated import GLMChineseTokenizer
17,455
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama': auto_config = LlamaConfig
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama': auto_config = LlamaConfig
auto_model_class = LlamaForCausalLM
5
2023-11-02 01:37:01+00:00
24k
bytedance/cryostar
projects/star/train_atom.py
[ { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgri...
import os.path as osp import warnings import collections import einops import numpy as np import biotite.structure as struc import torch import lightning.pytorch as pl from pathlib import Path from copy import deepcopy from torch import nn from torch import optim from torch.utils.data import DataLoader from torchinfo import summary from lightning.fabric.utilities.warnings import PossibleUserWarning from lightning.pytorch.utilities import rank_zero_only from lightning.pytorch.strategies import DDPStrategy from mmengine import mkdir_or_exist from cryostar.utils.transforms import SpatialGridTranslate from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig, Mask from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.losses import calc_cor_loss, calc_kl_loss from cryostar.utils.misc import log_to_current, \ pl_init_exp, pretty_dict, set_seed, warmup from cryostar.utils.pdb_tools import bt_save_pdb from cryostar.gmm.gmm import EMAN2Grid, batch_projection, Gaussian from cryostar.gmm.deformer import E3Deformer, NMADeformer from cryostar.utils.fft_utils import primal_to_fourier_2d, fourier_to_primal_2d from cryostar.utils.polymer import Polymer, NT_ATOMS, AA_ATOMS from cryostar.utils.dist_loss import (find_quaint_cutoff_pairs, find_range_cutoff_pairs, find_continuous_pairs, calc_dist_by_pair_indices, remove_duplicate_pairs, filter_same_chain_pairs, DistLoss) from cryostar.utils.latent_space_utils import get_nearest_point, cluster_kmeans, run_pca, get_pc_traj, run_umap from cryostar.utils.vis_utils import plot_z_dist, save_tensor_image from cryostar.utils.pl_utils import merge_step_outputs, squeeze_dict_outputs_1st_dim, \ filter_outputs_by_indices, get_1st_unique_indices from miscs import calc_pair_dist_loss, calc_clash_loss, low_pass_mask2d, VAE, infer_ctf_params_from_config
16,442
if cfg.loss.sse_weight != 0.0: log_to_current("use pseduo `sse` by building spatial/sequential edges") sse_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, 0, 20) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, sse_pairs) clash_pairs = find_range_cutoff_pairs(meta.coord, cfg.loss.clash_min_cutoff) clash_pairs = remove_duplicate_pairs(clash_pairs, connect_pairs) if len(connect_pairs) > 0: self.register_buffer("connect_pairs", torch.from_numpy(connect_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, connect_pairs) self.register_buffer("connect_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(connect_pairs)} connect_pairs") else: log_to_current("connect_pairs is empty") if cfg.loss.sse_weight != 0.0: self.register_buffer("sse_pairs", torch.from_numpy(sse_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, sse_pairs) self.register_buffer("sse_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(sse_pairs)} sse_pairs") if len(cutoff_pairs) > 0: dists = calc_dist_by_pair_indices(meta.coord, cutoff_pairs) log_to_current(f"found {len(cutoff_pairs)} cutoff_pairs") self.dist_loss_fn = DistLoss(cutoff_pairs, dists, reduction=None) # for chain-wise dropout cutoff_chain_mask = filter_same_chain_pairs(cutoff_pairs, meta.chain_id) self.register_buffer("cutoff_chain_mask", torch.from_numpy(cutoff_chain_mask)) else: log_to_current("cutoff_pairs is empty") if len(clash_pairs) > 0: self.register_buffer("clash_pairs", torch.from_numpy(clash_pairs).long()) log_to_current(f"found {len(clash_pairs)} clash_pairs") else: log_to_current("clash_pairs is empty") # low-pass filtering if hasattr(cfg.data_process, "low_pass_bandwidth"): log_to_current(f"Use low-pass filtering w/ {cfg.data_process.low_pass_bandwidth} A") lp_mask2d = low_pass_mask2d(cfg.data_process.down_side_shape, cfg.data_process.down_apix, cfg.data_process.low_pass_bandwidth) self.register_buffer("lp_mask2d", torch.from_numpy(lp_mask2d).float()) else: self.lp_mask2d = None # self.mask = Mask(cfg.data_process.down_side_shape, rad=cfg.loss.mask_rad_for_image_loss) # for projection grid = EMAN2Grid(side_shape=cfg.data_process.down_side_shape, voxel_size=cfg.data_process.down_apix) self.grid = grid ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) # translate image helper self.translator = SpatialGridTranslate(D=cfg.data_process.down_side_shape, device=self.device) self.apix = self.cfg.data_process.down_apix # cache self.validation_step_outputs = [] self.stored_metrics = {} self.history_saved_dirs = [] if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") self._load_ckpt(self.cfg.extra_input_data_attr.ckpt_path) def _save_ckpt(self, ckpt_path): torch.save( { "model": self.model.state_dict(), "gmm_sigmas": self.gmm_sigmas.data, "gmm_amps": self.gmm_amps.data }, ckpt_path) def _load_ckpt(self, ckpt_path): state_dict = torch.load(ckpt_path, map_location=self.device) self.model.load_state_dict(state_dict["model"]) if self.cfg.gmm.tunable: self.gmm_sigmas.data = state_dict["gmm_sigmas"] self.gmm_amps.data = state_dict["gmm_amps"] def _get_save_dir(self): save_dir = osp.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def low_pass_images(self, images): f_images = primal_to_fourier_2d(images) f_images = f_images * self.lp_mask2d images = fourier_to_primal_2d(f_images).real return images def get_batch_pose(self, batch): rot_mats = batch["rotmat"] # yx order trans_mats = torch.concat((batch["shiftY"].unsqueeze(1), batch["shiftX"].unsqueeze(1)), dim=1) trans_mats /= self.apix return rot_mats, trans_mats def _shared_forward(self, images, idxes, rots): # predict structure pred_deformation, mu, log_var = self.model(prepare_images(images, self.cfg.model.input_space), idxes, rots) return pred_deformation, mu, log_var def _shared_projection(self, pred_struc, rot_mats): pred_images = batch_projection(
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None: self.deformer = E3Deformer() else: self.deformer = NMADeformer(nma_modes) # loss or regularization's preparation # dist loss connect_pairs = find_continuous_pairs(meta.chain_id, meta.res_id, meta.atom_name) if cfg.extra_input_data_attr.use_domain: log_to_current("use domain instead of chain!") domain_id = np.load(cfg.extra_input_data_attr.domain_path) cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, domain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) else: # deal with RNA/DNA if np.sum(np.isin(meta.atom_name, NT_ATOMS)): # aa tmp_mask = np.isin(meta.atom_name, AA_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] aa_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) aa_cutoff_pairs = indices_in_pdb[aa_cutoff_pairs] log_to_current(f"{len(aa_cutoff_pairs)} AA pairs") # nt tmp_mask = np.isin(meta.atom_name, NT_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] nt_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.nt_intra_chain_cutoff, cfg.loss.nt_inter_chain_cutoff, cfg.loss.nt_intra_chain_res_bound) nt_cutoff_pairs = indices_in_pdb[nt_cutoff_pairs] log_to_current(f"{len(nt_cutoff_pairs)} NT pairs") cutoff_pairs = np.vstack((aa_cutoff_pairs, nt_cutoff_pairs)) else: cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, connect_pairs) if cfg.loss.sse_weight != 0.0: log_to_current("use pseduo `sse` by building spatial/sequential edges") sse_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, 0, 20) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, sse_pairs) clash_pairs = find_range_cutoff_pairs(meta.coord, cfg.loss.clash_min_cutoff) clash_pairs = remove_duplicate_pairs(clash_pairs, connect_pairs) if len(connect_pairs) > 0: self.register_buffer("connect_pairs", torch.from_numpy(connect_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, connect_pairs) self.register_buffer("connect_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(connect_pairs)} connect_pairs") else: log_to_current("connect_pairs is empty") if cfg.loss.sse_weight != 0.0: self.register_buffer("sse_pairs", torch.from_numpy(sse_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, sse_pairs) self.register_buffer("sse_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(sse_pairs)} sse_pairs") if len(cutoff_pairs) > 0: dists = calc_dist_by_pair_indices(meta.coord, cutoff_pairs) log_to_current(f"found {len(cutoff_pairs)} cutoff_pairs") self.dist_loss_fn = DistLoss(cutoff_pairs, dists, reduction=None) # for chain-wise dropout cutoff_chain_mask = filter_same_chain_pairs(cutoff_pairs, meta.chain_id) self.register_buffer("cutoff_chain_mask", torch.from_numpy(cutoff_chain_mask)) else: log_to_current("cutoff_pairs is empty") if len(clash_pairs) > 0: self.register_buffer("clash_pairs", torch.from_numpy(clash_pairs).long()) log_to_current(f"found {len(clash_pairs)} clash_pairs") else: log_to_current("clash_pairs is empty") # low-pass filtering if hasattr(cfg.data_process, "low_pass_bandwidth"): log_to_current(f"Use low-pass filtering w/ {cfg.data_process.low_pass_bandwidth} A") lp_mask2d = low_pass_mask2d(cfg.data_process.down_side_shape, cfg.data_process.down_apix, cfg.data_process.low_pass_bandwidth) self.register_buffer("lp_mask2d", torch.from_numpy(lp_mask2d).float()) else: self.lp_mask2d = None # self.mask = Mask(cfg.data_process.down_side_shape, rad=cfg.loss.mask_rad_for_image_loss) # for projection grid = EMAN2Grid(side_shape=cfg.data_process.down_side_shape, voxel_size=cfg.data_process.down_apix) self.grid = grid ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) # translate image helper self.translator = SpatialGridTranslate(D=cfg.data_process.down_side_shape, device=self.device) self.apix = self.cfg.data_process.down_apix # cache self.validation_step_outputs = [] self.stored_metrics = {} self.history_saved_dirs = [] if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") self._load_ckpt(self.cfg.extra_input_data_attr.ckpt_path) def _save_ckpt(self, ckpt_path): torch.save( { "model": self.model.state_dict(), "gmm_sigmas": self.gmm_sigmas.data, "gmm_amps": self.gmm_amps.data }, ckpt_path) def _load_ckpt(self, ckpt_path): state_dict = torch.load(ckpt_path, map_location=self.device) self.model.load_state_dict(state_dict["model"]) if self.cfg.gmm.tunable: self.gmm_sigmas.data = state_dict["gmm_sigmas"] self.gmm_amps.data = state_dict["gmm_amps"] def _get_save_dir(self): save_dir = osp.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def low_pass_images(self, images): f_images = primal_to_fourier_2d(images) f_images = f_images * self.lp_mask2d images = fourier_to_primal_2d(f_images).real return images def get_batch_pose(self, batch): rot_mats = batch["rotmat"] # yx order trans_mats = torch.concat((batch["shiftY"].unsqueeze(1), batch["shiftX"].unsqueeze(1)), dim=1) trans_mats /= self.apix return rot_mats, trans_mats def _shared_forward(self, images, idxes, rots): # predict structure pred_deformation, mu, log_var = self.model(prepare_images(images, self.cfg.model.input_space), idxes, rots) return pred_deformation, mu, log_var def _shared_projection(self, pred_struc, rot_mats): pred_images = batch_projection(
gauss=Gaussian(
12
2023-11-06 07:15:26+00:00
24k
KAIST-AILab/palr
train.py
[ { "identifier": "BC", "path": "imitation/bc.py", "snippet": "class BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, envname=None, wandb=None, save_policy_path=None, \n ...
import os import wandb import envs import d4rl import gym import torch from imitation.bc import BC from imitation.rap import RAP from imitation.fca import FCA from imitation.mine import MINE_BC from imitation.palr import PALR from argparse import ArgumentParser from itertools import product from core.policy import TanhGaussianPolicyWithEmbedding, TanhGaussianRAPPolicy from core.replay_buffer import EnvReplayBuffer from core.preprocess import preprocess_dataset_with_prev_actions, data_select_num_transitions from rlkit.envs.wrappers import NormalizedBoxEnv
19,053
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs): env = NormalizedBoxEnv(gym.make(configs['envname'])) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size d4rl_env = gym.make(configs['d4rl_env_name']) stacksize = configs['stacksize'] if stacksize == 0: stacksize = 1 device = 'cuda' if torch.cuda.is_available() else 'cpu' envname, envtype = configs['envname'], configs['envtype'] traj_load_path = configs['traj_load_path'] print(f'-- Loading dataset from {traj_load_path}...') dataset = d4rl_env.get_dataset() print(f'-- Done!') print(f'-- Preprocessing dataset... ({envtype}, {stacksize})')
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs): env = NormalizedBoxEnv(gym.make(configs['envname'])) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size d4rl_env = gym.make(configs['d4rl_env_name']) stacksize = configs['stacksize'] if stacksize == 0: stacksize = 1 device = 'cuda' if torch.cuda.is_available() else 'cpu' envname, envtype = configs['envname'], configs['envtype'] traj_load_path = configs['traj_load_path'] print(f'-- Loading dataset from {traj_load_path}...') dataset = d4rl_env.get_dataset() print(f'-- Done!') print(f'-- Preprocessing dataset... ({envtype}, {stacksize})')
path = preprocess_dataset_with_prev_actions(dataset, envtype, stacksize, configs['partially_observable'], action_history_len=2)
8
2023-11-06 08:35:34+00:00
24k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): #...
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
15,253
def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1)
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem() markus_system = MarkusSystem() bg_music_enabled = True player_level = 1 has_started_game = False # Save the game state to a file def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1)
clear_terminal()
0
2023-11-06 09:52:13+00:00
24k
ziqi-zhang/TAOISM
python/test/test_conv.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_...
import os import sys import numpy as np import torch import torch.distributed as dist import sys import pdb from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.flatten import SecretFlattenLayer from python.layers.input import SecretInputLayer from python.layers.maxpool2d import SecretMaxpool2dLayer from python.layers.output import SecretOutputLayer from python.layers.relu import SecretReLULayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.layers.sgx_linear_base import SGXLinearBase from python.layers.sgx_conv_base import SGXConvBase from python.utils.basic_utils import ExecutionModeOptions from python.utils.logger_utils import Logger from python.quantize_net import NetQ from python.test_sgx_net import argparser_distributed, marshal_process, load_cifar10, seed_torch from python.utils.timer_utils import NamedTimerInstance, VerboseLevel, NamedTimer from python.utils.torch_utils import compare_expected_actual from pdb import set_trace as st
21,043
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name)
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name)
compare_name_function = [("input", get_layer_input), ("output", get_layer_output),
3
2023-11-01 10:37:37+00:00
24k
Codra-Ingenierie-Informatique/DataLab
cdl/core/gui/panel/signal.py
[ { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\"...
from typing import TYPE_CHECKING from plotpy.tools import ( HCursorTool, HRangeTool, LabelTool, RectangleTool, SegmentTool, VCursorTool, XCursorTool, ) from cdl.config import _ from cdl.core.gui import roieditor from cdl.core.gui.actionhandler import SignalActionHandler from cdl.core.gui.panel.base import BaseDataPanel from cdl.core.gui.plothandler import SignalPlotHandler from cdl.core.gui.processor.signal import SignalProcessor from cdl.core.io.signal import SignalIORegistry from cdl.core.model.signal import SignalObj, create_signal_from_param, new_signal_param from plotpy.plot import PlotWidget from qtpy import QtWidgets as QW from cdl.core.model.signal import NewSignalParam import guidata.dataset as gds
19,399
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """DataLab Signal Panel""" # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SignalPanel(BaseDataPanel): """Object handling the item list, the selected item properties and plot, specialized for Signal objects""" PANEL_STR = _("Signal panel") PARAMCLASS = SignalObj ANNOTATION_TOOLS = ( LabelTool, VCursorTool, HCursorTool, XCursorTool, SegmentTool, RectangleTool, HRangeTool, )
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """DataLab Signal Panel""" # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SignalPanel(BaseDataPanel): """Object handling the item list, the selected item properties and plot, specialized for Signal objects""" PANEL_STR = _("Signal panel") PARAMCLASS = SignalObj ANNOTATION_TOOLS = ( LabelTool, VCursorTool, HCursorTool, XCursorTool, SegmentTool, RectangleTool, HRangeTool, )
IO_REGISTRY = SignalIORegistry
6
2023-11-09 16:56:03+00:00
24k
ingra14m/Tensor4D-DNeRF
exp_runner.py
[ { "identifier": "Dataset", "path": "models/dataset.py", "snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('...
import os import time import logging import argparse import numpy as np import cv2 as cv import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from shutil import copyfile from tqdm import tqdm from pyhocon import ConfigFactory from models.dataset import Dataset, BlenderDataset from models.fields import RenderingNetwork, FieldNetwork, SingleVarianceNetwork from models.tensor4d import Tensor4D from models.renderer import NeuSRenderer from models.mask import Mask3D from metrics import *
16,196
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False) self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset']) self.g_nums = self.conf['dataset']['g_nums'] self.iter_step = 0 self.flow = self.conf.get_bool('model.flow', default=False) # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.batch_size = self.conf.get_int('train.batch_size') self.fine_level_iter = self.conf.get_int('train.fine_level_iter') self.downsample_iter = self.conf.get_int('train.downsample_iter') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.warm_up_imgs = self.conf.get_int('train.warm_up_imgs', default=50) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.mask_color_loss = self.conf.get_bool('train.mask_color_loss') self.weighted_sample = self.conf.get_bool('train.weighted_sample') # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.tgr_weight = self.conf.get_float('train.tgr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.tv_weight = self.conf.get_float('train.tv_weight') if self.tv_weight > 0: self.reg_l2 = True else: self.reg_l2 = False self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Masks self.mask3d = Mask3D(**self.conf['model.mask3d'], num_frames=self.dataset.n_images // self.g_nums, device=self.device) # Networks self.tensor4d = Tensor4D(**self.conf['model.tensor4d']).to(self.device)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False) self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset']) self.g_nums = self.conf['dataset']['g_nums'] self.iter_step = 0 self.flow = self.conf.get_bool('model.flow', default=False) # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.batch_size = self.conf.get_int('train.batch_size') self.fine_level_iter = self.conf.get_int('train.fine_level_iter') self.downsample_iter = self.conf.get_int('train.downsample_iter') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.warm_up_imgs = self.conf.get_int('train.warm_up_imgs', default=50) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.mask_color_loss = self.conf.get_bool('train.mask_color_loss') self.weighted_sample = self.conf.get_bool('train.weighted_sample') # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.tgr_weight = self.conf.get_float('train.tgr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.tv_weight = self.conf.get_float('train.tv_weight') if self.tv_weight > 0: self.reg_l2 = True else: self.reg_l2 = False self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Masks self.mask3d = Mask3D(**self.conf['model.mask3d'], num_frames=self.dataset.n_images // self.g_nums, device=self.device) # Networks self.tensor4d = Tensor4D(**self.conf['model.tensor4d']).to(self.device)
self.sdf_network = FieldNetwork(d_t4d=self.tensor4d.dims, **self.conf['model.sdf_network']).to(self.device)
3
2023-11-07 10:16:33+00:00
24k
Kushalhk/AutoFilter
utils.py
[ { "identifier": "AUTH_CHANNEL", "path": "info.py", "snippet": "AUTH_CHANNEL = int(auth_channel) if auth_channel and id_pattern.search(auth_channel) else None" }, { "identifier": "LONG_IMDB_DESCRIPTION", "path": "info.py", "snippet": "LONG_IMDB_DESCRIPTION = is_enabled(environ.get(\"LONG_...
import logging import asyncio import pytz import random import re import os import string import requests import aiohttp import http.client import json from pyrogram.errors import InputUserDeactivated, UserNotParticipant, FloodWait, UserIsBlocked, PeerIdInvalid from info import AUTH_CHANNEL, LONG_IMDB_DESCRIPTION, MAX_LIST_ELM, SHORTLINK_URL, SHORTLINK_API, IS_SHORTLINK, LOG_CHANNEL, TUTORIAL, GRP_LNK, CHNL_LNK, CUSTOM_FILE_CAPTION, SECOND_SHORTLINK_URL, SECOND_SHORTLINK_API from imdb import Cinemagoer from pyrogram.types import Message, InlineKeyboardButton, InlineKeyboardMarkup from pyrogram.errors import FloodWait, UserIsBlocked, MessageNotModified, PeerIdInvalid from pyrogram import enums from typing import Union from Script import script from datetime import datetime, date from typing import List from database.users_chats_db import db from bs4 import BeautifulSoup from shortzy import Shortzy
19,308
logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) BTN_URL_REGEX = re.compile( r"(\[([^\[]+?)\]\((buttonurl|buttonalert):(?:/{0,2})(.+?)(:same)?\))" ) imdb = Cinemagoer() TOKENS = {} VERIFIED = {} BANNED = {} SECOND_SHORTENER = {} SMART_OPEN = '“' SMART_CLOSE = '”' START_CHAR = ('\'', '"', SMART_OPEN) # temp db for banned class temp(object): BANNED_USERS = [] BANNED_CHATS = [] ME = None CURRENT=int(os.environ.get("SKIP", 2)) CANCEL = False MELCOW = {} U_NAME = None B_NAME = None GETALL = {} SHORT = {} SETTINGS = {} async def is_subscribed(bot, query): try:
logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) BTN_URL_REGEX = re.compile( r"(\[([^\[]+?)\]\((buttonurl|buttonalert):(?:/{0,2})(.+?)(:same)?\))" ) imdb = Cinemagoer() TOKENS = {} VERIFIED = {} BANNED = {} SECOND_SHORTENER = {} SMART_OPEN = '“' SMART_CLOSE = '”' START_CHAR = ('\'', '"', SMART_OPEN) # temp db for banned class temp(object): BANNED_USERS = [] BANNED_CHATS = [] ME = None CURRENT=int(os.environ.get("SKIP", 2)) CANCEL = False MELCOW = {} U_NAME = None B_NAME = None GETALL = {} SHORT = {} SETTINGS = {} async def is_subscribed(bot, query): try:
user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)
0
2023-11-03 12:21:26+00:00
24k
apple/ml-reed
reed/algorithms/pebble.py
[ { "identifier": "utils", "path": "BPref/utils.py", "snippet": "def make_env(cfg):\ndef ppo_make_env(env_id, seed):\ndef tie_weights(src, trg):\ndef make_metaworld_env(cfg):\ndef ppo_make_metaworld_env(env_id, seed):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *arg...
import typing as t import time import numpy as np import torch import hydra from pathlib import Path from omegaconf import dictconfig, OmegaConf from BPref import utils from BPref.logger import Logger from BPref.replay_buffer import TrajectoryReplayBuffer from collections import deque from reed.models.reward_model import StateActionRewardModel from reed.data.preference_dataset import PreferenceDataset from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader from reed.data.preprocess_images import PreProcessInference
21,223
Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") observation_space = img_obs.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _render_image_observation(self) -> np.ndarray: """ Render the current image observation """ if "metaworld" in self.experiment_config.env: img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) else: img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) return img_obs def construct_reward_ensemble(self) -> StateActionRewardModel: """ Create the reward ensemble as specified in the experiment config. """ return StateActionRewardModel( in_dim=self.reward_in_dim, ensemble_size=self.experiment_config.ensemble_size, hidden_dim=self.experiment_config.reward_hidden_embed_dim, hidden_layers=self.experiment_config.reward_num_hidden_layers, final_activation=self.experiment_config.activation, lr=self.experiment_config.reward_lr, reward_train_batch=self.experiment_config.reward_train_batch, size_segment=self.experiment_config.segment_size, device=self.device, multi_gpu=self.multi_gpu, image_observations=self.experiment_config.reward_from_image_observations, image_encoder_architecture=self.experiment_config.image_encoder_architecture, image_hidden_num_channels=self.experiment_config.image_hidden_num_channels, grayscale_images=self.experiment_config.grayscale_images ) def evaluate(self): average_episode_reward = 0 average_true_episode_reward = 0 success_rate = 0 for episode in range(self.experiment_config.num_eval_episodes): obs = self.env.reset() self.agent.reset() done = False episode_reward = 0 true_episode_reward = 0 if self.log_success: episode_success = 0 while not done: with utils.eval_mode(self.agent): action = self.agent.act(obs, sample=False) obs, reward, done, extra = self.env.step(action) episode_reward += reward true_episode_reward += reward if self.log_success: episode_success = max(episode_success, extra['success']) average_episode_reward += episode_reward average_true_episode_reward += true_episode_reward if self.log_success: success_rate += episode_success average_episode_reward /= self.experiment_config.num_eval_episodes average_true_episode_reward /= self.experiment_config.num_eval_episodes if self.log_success: success_rate /= self.experiment_config.num_eval_episodes success_rate *= 100.0 self.logger.log('eval/episode_reward', average_episode_reward, self.step) self.logger.log('eval/true_episode_reward', average_true_episode_reward, self.step) if self.log_success: self.logger.log('eval/success_rate', success_rate, self.step) self.logger.log('train/true_episode_success', success_rate, self.step) self.logger.dump(self.step) def train_reward_on_preferences(self) -> t.Optional[float]: """ Update the reward model on the current preference dataset Returns: train accuracy on the current reward model update round, if the preference dataset contains samples """ # create the data loader that will be used to train the reward model
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes utils.set_seed_everywhere(self.experiment_config.seed) # the device on which models will be trained self.device = torch.device(self.experiment_config.device) # flag to make sure we are handling multi-gpu training where we need to self.multi_gpu = torch.cuda.device_count() > 1 print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print(f"There is {torch.cuda.device_count()} GPU, so models will be trained with torch.nn.DataParallel.") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") # make the environment if 'metaworld' in self.experiment_config.env: self.env = utils.make_metaworld_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = True else: self.env = utils.make_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = False print('----------------------') print('----------------------') print('----------------------') print('----------------------') print("observation space ", self.env.observation_space.shape[0]) print("action space ", self.env.action_space.shape[0]) print('----------------------') print('----------------------') print('----------------------') print('----------------------') # we need to set the policy's observation and action space self.experiment_config.agent.params.obs_dim = self.env.observation_space.shape[0] self.experiment_config.agent.params.action_dim = self.env.action_space.shape[0] self.experiment_config.agent.params.action_range = [ float(self.env.action_space.low.min()), float(self.env.action_space.high.max()) ] # create the agent specified in the configuration self.agent = hydra.utils.instantiate(self.experiment_config.agent) # the class that will format the observations and observation action pairs for consumption by the reward model self._reward_input_preprocessor = PreProcessInference( image_observations=self.experiment_config.reward_from_image_observations, grayscale_images=self.experiment_config.grayscale_images, normalize_images=self.experiment_config.normalized_images) # determine the reward's observation space # if the reward is trained on images then the reward's observation space differs from the policy's, which is # trained on the state space self._observation_dimensionality = self._determine_observation_dimensions() self._reward_observation_dimensionality = self._determine_reward_observation_dimensions() # create the agent's replay buffer setting if image observations will need to be tracked self.replay_buffer = TrajectoryReplayBuffer( int(self.experiment_config.replay_buffer_capacity), self.device, image_observations=(self._observation_dimensionality if (self.experiment_config.reward_from_image_observations or self.experiment_config.save_image_observations) else None) ) # determine the dimensionality of the input to the reward function self.reward_in_dim = self._determine_reward_input_dimensions( observation_dim=self._reward_observation_dimensionality, action_dim=self.env.action_space.shape[0]) # instantiating the reward model self.reward_model = self.construct_reward_ensemble() # create the preference dataset that will solicit and hold labelled preference triplets self.preference_dataset = PreferenceDataset( observation_dim=self._reward_observation_dimensionality, action_dim=self.env.action_space.shape[0], capacity=self.experiment_config.preference_dataset_capacity, size_segment=self.experiment_config.segment_size, out_path=Path("/tmp/preference_dataset/"), image_observations=self.experiment_config.reward_from_image_observations, state_action_formatter=self._reward_input_preprocessor, grayscale_images=self.experiment_config.grayscale_images, collect_image_pref_dataset=self.experiment_config.save_image_observations, teacher_beta=self.experiment_config.teacher_beta, teacher_gamma=self.experiment_config.teacher_gamma, teacher_eps_mistake=self.experiment_config.teacher_eps_mistake, teacher_eps_skip=self.experiment_config.teacher_eps_skip, teacher_eps_equal=self.experiment_config.teacher_eps_equal ) # save the experimental configuration with open(Path(self.experiment_config.out_dir) / "experiment_config.yaml", "w+") as f: OmegaConf.save(config=self.experiment_config, f=f) def _determine_reward_input_dimensions(self, observation_dim: t.Union[int, np.ndarray], action_dim: int) -> t.Union[int, t.Sequence]: """ Determine the dimensionality of the inputs to the reward model Args: observation_dim: the dimensionality of agent observations. If the observation is an image, the dimensionality should have the following order: (num_channels, height, width) action_dim: the dimensionality of agent actions Returns: the dimensionality of the reward model's inputs """ # compute the dimensions of the input to the reward function if not self.experiment_config.reward_from_image_observations: return observation_dim + action_dim else: # we need to concatenate the actions to last dimension of the image # the input to the reward net also needs to have the channels first # the image dimensions are given to us a (height, width, channels) sample_shape = list(observation_dim) if self.experiment_config.grayscale_images: num_channels = action_dim + 1 else: num_channels = sample_shape[0] + action_dim # update the number of channels sample_shape[0] = num_channels # the dimensions of the input to the reward model return sample_shape def _determine_reward_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the reward input shape needs to be set accordingly Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") formatted_image_observation = self._reward_input_preprocessor.format_state(img_obs).squeeze(axis=0) observation_space = formatted_image_observation.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _determine_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the replay buffer needs to be set up to accumulate the image observations Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") observation_space = img_obs.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _render_image_observation(self) -> np.ndarray: """ Render the current image observation """ if "metaworld" in self.experiment_config.env: img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) else: img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) return img_obs def construct_reward_ensemble(self) -> StateActionRewardModel: """ Create the reward ensemble as specified in the experiment config. """ return StateActionRewardModel( in_dim=self.reward_in_dim, ensemble_size=self.experiment_config.ensemble_size, hidden_dim=self.experiment_config.reward_hidden_embed_dim, hidden_layers=self.experiment_config.reward_num_hidden_layers, final_activation=self.experiment_config.activation, lr=self.experiment_config.reward_lr, reward_train_batch=self.experiment_config.reward_train_batch, size_segment=self.experiment_config.segment_size, device=self.device, multi_gpu=self.multi_gpu, image_observations=self.experiment_config.reward_from_image_observations, image_encoder_architecture=self.experiment_config.image_encoder_architecture, image_hidden_num_channels=self.experiment_config.image_hidden_num_channels, grayscale_images=self.experiment_config.grayscale_images ) def evaluate(self): average_episode_reward = 0 average_true_episode_reward = 0 success_rate = 0 for episode in range(self.experiment_config.num_eval_episodes): obs = self.env.reset() self.agent.reset() done = False episode_reward = 0 true_episode_reward = 0 if self.log_success: episode_success = 0 while not done: with utils.eval_mode(self.agent): action = self.agent.act(obs, sample=False) obs, reward, done, extra = self.env.step(action) episode_reward += reward true_episode_reward += reward if self.log_success: episode_success = max(episode_success, extra['success']) average_episode_reward += episode_reward average_true_episode_reward += true_episode_reward if self.log_success: success_rate += episode_success average_episode_reward /= self.experiment_config.num_eval_episodes average_true_episode_reward /= self.experiment_config.num_eval_episodes if self.log_success: success_rate /= self.experiment_config.num_eval_episodes success_rate *= 100.0 self.logger.log('eval/episode_reward', average_episode_reward, self.step) self.logger.log('eval/true_episode_reward', average_true_episode_reward, self.step) if self.log_success: self.logger.log('eval/success_rate', success_rate, self.step) self.logger.log('train/true_episode_success', success_rate, self.step) self.logger.dump(self.step) def train_reward_on_preferences(self) -> t.Optional[float]: """ Update the reward model on the current preference dataset Returns: train accuracy on the current reward model update round, if the preference dataset contains samples """ # create the data loader that will be used to train the reward model
preference_data_loader = PreferenceTripletEnsembleDataLoader(
5
2023-11-06 23:14:20+00:00
24k
alibaba/animate-anything
train.py
[ { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n video_dir: st...
import argparse import datetime import logging import inspect import math import os import json import gc import copy import random import cv2 import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms as T import diffusers import transformers import numpy as np import imageio import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from tqdm.auto import tqdm from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers.models import AutoencoderKL from diffusers import DPMSolverMultistepScheduler, DDPMScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, export_to_video from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth import tensor2vid from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset, VideoBLIPDataset from einops import rearrange, repeat from models.unet_3d_condition_mask import UNet3DConditionModel from models.pipeline import LatentToVideoPipeline from utils.lora_handler import LoraHandler, LORA_VERSIONS from utils.common import read_mask, generate_random_mask, slerp, calculate_motion_score, \ read_video, calculate_motion_precision, calculate_latent_motion_score, \ DDPM_forward, DDPM_forward_timesteps, DDPM_forward_mask, motion_mask_loss, \ generate_center_mask, tensor_to_vae_latent from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
19,661
return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, device, weight_dtype): for model in model_list: if model is not None: model.to(device, dtype=weight_dtype) def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, cached_latent_dir=None, shuffle=False ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16)
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] dataset_cls = [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset, VideoBLIPDataset] dataset_map = {d.__getname__(): d for d in dataset_cls} # Loop through all available datasets, get the name, then add to list of data to process. for dataset in dataset_types: if dataset in dataset_map: train_datasets.append(dataset_map[dataset](**train_data, tokenizer=tokenizer)) else: raise ValueError(f"Dataset type not found: {dataset} not in {dataset_map.keys()}") return train_datasets def extend_datasets(datasets, dataset_items, extend=False): biggest_data_len = max(x.__len__() for x in datasets) extended = [] for dataset in datasets: if dataset.__len__() == 0: del dataset continue if dataset.__len__() < biggest_data_len: for item in dataset_items: if extend and item not in extended and hasattr(dataset, item): print(f"Extending {item}") value = getattr(dataset, item) value *= biggest_data_len value = value[:biggest_data_len] setattr(dataset, item, value) print(f"New {item} dataset length: {dataset.__len__()}") extended.append(item) def export_to_video(video_frames, output_video_path, fps): fourcc = cv2.VideoWriter_fourcc(*"mp4v") h, w, _ = video_frames[0].shape video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h)) for i in range(len(video_frames)): img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) video_writer.write(img) def create_output_folders(output_dir, config): now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(output_dir, f"train_{now}") os.makedirs(out_dir, exist_ok=True) os.makedirs(f"{out_dir}/samples", exist_ok=True) OmegaConf.save(config, os.path.join(out_dir, 'config.yaml')) return out_dir def load_primary_models(pretrained_model_path, motion_mask, motion_strength): noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae") unet = UNet3DConditionModel.from_pretrained(pretrained_model_path, subfolder="unet", low_cpu_mem_usage=False, device_map=None, motion_mask=motion_mask, motion_strength=motion_strength) if pretrained_model_path.endswith('zeroscope_v2_576w'): #first time init, modify unet conv in2 unet.conv_in2.bias.data = copy.deepcopy(unet.conv_in.bias) torch.nn.init.zeros_(unet.conv_in2.weight) unet.conv_in2.weight.data[:,1:]= copy.deepcopy(unet.conv_in.weight) return noise_scheduler, tokenizer, text_encoder, vae, unet def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable): unet._set_gradient_checkpointing(value=unet_enable) if text_enable: text_encoder.gradient_checkpointing_enable() else: text_encoder.gradient_checkpointing_disable() def freeze_models(models_to_freeze): for model in models_to_freeze: if model is not None: model.requires_grad_(False) def is_attn(name): return ('attn1' or 'attn2' == name.split('.')[-1]) def set_processors(attentions): for attn in attentions: attn.set_processor(AttnProcessor2_0()) def set_torch_2_attn(unet): optim_count = 0 for name, module in unet.named_modules(): if is_attn(name): if isinstance(module, torch.nn.ModuleList): for m in module: if isinstance(m, BasicTransformerBlock): set_processors([m.attn1, m.attn2]) optim_count += 1 if optim_count > 0: print(f"{optim_count} Attention layers using Scaled Dot Product Attention.") def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet): try: is_torch_2 = hasattr(F, 'scaled_dot_product_attention') enable_torch_2 = is_torch_2 and enable_torch_2_attn if enable_xformers_memory_efficient_attention and not enable_torch_2: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) else: raise ValueError("xformers is not available. Make sure it is installed correctly") if enable_torch_2: set_torch_2_attn(unet) except: print("Could not enable memory efficient attention for xformers or Torch 2.0.") def param_optim(model, condition, extra_params=None, is_lora=False, negation=None): extra_params = extra_params if len(extra_params.keys()) > 0 else None return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, device, weight_dtype): for model in model_list: if model is not None: model.to(device, dtype=weight_dtype) def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, cached_latent_dir=None, shuffle=False ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16)
batch['pixel_values'] = tensor_to_vae_latent(pixel_values, vae)
22
2023-12-07 08:26:29+00:00
24k
modelscope/richdreamer
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import numpy as np import os import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import (BaseExplicitGeometry, BaseGeometry, contract_to_unisphere,) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast, get_rank from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,175
if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 nerf_scale: float = 1.0 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False # sdf_bias: Union[float, str] = 0.0 # sdf_bias_params: Optional[Any] = None cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
2
2023-12-06 07:53:11+00:00
24k
rehg-lab/RAVE
annotator/oneformer/oneformer/demo/visualizer.py
[ { "identifier": "MetadataCatalog", "path": "annotator/oneformer/detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\r\nclass Metadata(types.SimpleNamespace):\r\nclass _MetadataCatalog(UserDict):\r\n def register(self, name, func):\r\n def get(self, name):\r\n def list(self...
import colorsys import logging import math import numpy as np import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import annotator.oneformer.pycocotools.mask as mask_util import torch import random from enum import Enum, unique from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from annotator.oneformer.detectron2.data import MetadataCatalog from annotator.oneformer.detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes from annotator.oneformer.detectron2.utils.file_io import PathManager from .colormap import random_color, _COLORS
17,322
self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): """ Args: soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) shape2d = (soft_mask.shape[0], soft_mask.shape[1]) rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = soft_mask * alpha self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) binary_mask = (soft_mask > 0.5).astype("uint8") # self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """
# Copyright (c) Facebook, Inc. and its affiliates. random.seed(0) logger = logging.getLogger(__name__) __all__ = ["ColorMode", "VisImage", "Visualizer"] _SMALL_OBJECT_AREA_THRESH = 1000 _LARGE_MASK_AREA_THRESH = 120000 _OFF_WHITE = (1.0, 1.0, 1.0) _BLACK = (0, 0, 0) _RED = (1.0, 0, 0) _KEYPOINT_THRESHOLD = 0.05 def instance_color(rgb=False, idx=1, maximum=255): """ Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a vector of 3 numbers """ ret = _COLORS[idx] * maximum if not rgb: ret = ret[::-1] return ret @unique class ColorMode(Enum): """ Enum of different color modes to use for instance visualizations. """ IMAGE = 0 """ Picks a random color for every instance and overlay segmentations with low opacity. """ SEGMENTATION = 1 """ Let instances of the same category have similar colors (from metadata.thing_colors), and overlay them with high opacity. This provides more attention on the quality of segmentation. """ IMAGE_BW = 2 """ Same as IMAGE, but convert all areas without masks to gray-scale. Only available for drawing per-instance mask predictions. """ class GenericMask: """ Attribute: polygons (list[ndarray]): list[ndarray]: polygons for this mask. Each ndarray has format [x, y, x, y, ...] mask (ndarray): a binary mask """ def __init__(self, mask_or_polygons, height, width): self._mask = self._polygons = self._has_holes = None self.height = height self.width = width m = mask_or_polygons if isinstance(m, dict): # RLEs assert "counts" in m and "size" in m if isinstance(m["counts"], list): # uncompressed RLEs h, w = m["size"] assert h == height and w == width m = mask_util.frPyObjects(m, h, w) self._mask = mask_util.decode(m)[:, :] return if isinstance(m, list): # list[ndarray] self._polygons = [np.asarray(x).reshape(-1) for x in m] return if isinstance(m, np.ndarray): # assumed to be a binary mask assert m.shape[1] != 2, m.shape assert m.shape == ( height, width, ), f"mask shape: {m.shape}, target dims: {height}, {width}" self._mask = m.astype("uint8") return raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) @property def mask(self): if self._mask is None: self._mask = self.polygons_to_mask(self._polygons) return self._mask @property def polygons(self): if self._polygons is None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) return self._polygons @property def has_holes(self): if self._has_holes is None: if self._mask is not None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) else: self._has_holes = False # if original format is polygon, does not have holes return self._has_holes def mask_to_polygons(self, mask): # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. # Internal contours (holes) are placed in hierarchy-2. # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) hierarchy = res[-1] if hierarchy is None: # empty mask return [], False has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 res = res[-2] res = [x.flatten() for x in res] # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. # We add 0.5 to turn them into real-value coordinate space. A better solution # would be to first +0.5 and then dilate the returned polygon by 0.5. res = [x + 0.5 for x in res if len(x) >= 6] return res, has_holes def polygons_to_mask(self, polygons): rle = mask_util.frPyObjects(polygons, self.height, self.width) rle = mask_util.merge(rle) return mask_util.decode(rle)[:, :] def area(self): return self.mask.sum() def bbox(self): p = mask_util.frPyObjects(self.polygons, self.height, self.width) p = mask_util.merge(p) bbox = mask_util.toBbox(p) bbox[2] += bbox[0] bbox[3] += bbox[1] return bbox class _PanopticPrediction: """ Unify different panoptic annotation/prediction formats """ def __init__(self, panoptic_seg, segments_info, metadata=None): if segments_info is None: assert metadata is not None # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label. label_divisor = metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_seg.numpy()): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() segments_info.append( { "id": int(panoptic_label), "category_id": int(pred_class), "isthing": bool(isthing), } ) del metadata self._seg = panoptic_seg self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) areas = areas.numpy() sorted_idxs = np.argsort(-areas) self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] self._seg_ids = self._seg_ids.tolist() for sid, area in zip(self._seg_ids, self._seg_areas): if sid in self._sinfo: self._sinfo[sid]["area"] = float(area) def non_empty_mask(self): """ Returns: (H, W) array, a mask for all pixels that have a prediction """ empty_ids = [] for id in self._seg_ids: if id not in self._sinfo: empty_ids.append(id) if len(empty_ids) == 0: return np.zeros(self._seg.shape, dtype=np.uint8) assert ( len(empty_ids) == 1 ), ">1 ids corresponds to no labels. This is currently not supported" return (self._seg != empty_ids[0]).numpy().astype(np.bool) def semantic_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or sinfo["isthing"]: # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. continue yield (self._seg == sid).numpy().astype(np.bool), sinfo def instance_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or not sinfo["isthing"]: continue mask = (self._seg == sid).numpy().astype(np.bool) if mask.sum() > 0: yield mask, sinfo def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels class VisImage: def __init__(self, img, scale=1.0): """ Args: img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. scale (float): scale the input image """ self.img = img self.scale = scale self.width, self.height = img.shape[1], img.shape[0] self._setup_figure(img) def _setup_figure(self, img): """ Args: Same as in :meth:`__init__()`. Returns: fig (matplotlib.pyplot.figure): top level container for all the image plot elements. ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. """ fig = mplfigure.Figure(frameon=False) self.dpi = fig.get_dpi() # add a small 1e-2 to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches( (self.width * self.scale + 1e-2) / self.dpi, (self.height * self.scale + 1e-2) / self.dpi, ) self.canvas = FigureCanvasAgg(fig) # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") self.fig = fig self.ax = ax self.reset_image(img) def reset_image(self, img): """ Args: img: same as in __init__ """ img = img.astype("uint8") self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") def save(self, filepath): """ Args: filepath (str): a string that contains the absolute path, including the file name, where the visualized image will be saved. """ self.fig.savefig(filepath) def get_image(self): """ Returns: ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. The shape is scaled w.r.t the input image using the given `scale` argument. """ canvas = self.canvas s, (width, height) = canvas.print_to_buffer() # buf = io.BytesIO() # works for cairo backend # canvas.print_rgba(buf) # width, height = self.width, self.height # s = buf.getvalue() buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) return rgb.astype("uint8") class Visualizer: """ Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, is_img=True, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ if is_img: self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) else: self.img = np.zeros_like(img_rgb).clip(0, 255).astype(np.uint8) + 255 if metadata is None: metadata = MetadataCatalog.get("__nonexist__") self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale ) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD def get_image(self, img): img = np.asarray(img).clip(0, 255).astype(np.uint8) return VisImage(img, scale=1.0) def draw_box_predictions( self, boxes=None, labels=None, scores=None, assigned_colors=None ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 boxes = self._convert_boxes(boxes) classes = labels.tolist() scores = scores.tolist() labels = _create_text_labels(classes, scores, self.metadata.get("stuff_classes", None)) num_instances = len(boxes) assert len(labels) == num_instances if assigned_colors is None: # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. areas = None areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = (x0, y0) # if drawing boxes, put text on the box corner. horiz_align = "left" else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if ( instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale ): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) return self.output def draw_instance_predictions(self, predictions, alpha=0.8, is_text=True): """ Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None labels = _create_text_labels(classes, scores, self.metadata.get("stuff_classes", None)) keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = np.asarray(predictions.pred_masks) masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("stuff_colors"): # colors = [ # self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes # ] colors = [ instance_color(rgb=True, idx=c, maximum=1) for c in classes ] else: colors = None if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image( (predictions.pred_masks.any(dim=0) > 0).numpy() if predictions.has("pred_masks") else None ) ) self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, is_text=is_text, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8, is_text=True, edge_color=_OFF_WHITE): """ Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=edge_color, text=text, alpha=alpha, area_threshold=area_threshold, is_text=is_text, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7, is_text=True,): """ Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, is_text=is_text, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x["category_id"] for x in sinfo] try: scores = [x["score"] for x in sinfo] except KeyError: scores = None labels = _create_text_labels( category_ids, scores, self.metadata.stuff_classes, [x.get("iscrowd", 0) for x in sinfo] ) try: colors = [ self._jitter([x / 255 for x in self.metadata.stuff_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha, is_text=is_text) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """ Draw annotations/segmentaions in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("stuff_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.stuff_colors[c]]) for c in category_ids ] names = self.metadata.get("stuff_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) # if pan_seg is None and "pan_seg_file_name" in dic: # with PathManager.open(dic["pan_seg_file_name"], "rb") as f: # pan_seg = Image.open(f) # pan_seg = np.asarray(pan_seg) # from panopticapi.utils import rgb2id # # pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, is_text=True, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None: # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] if num_instances == 0: return self.output if boxes is not None and boxes.shape[1] == 5: return self.overlay_rotated_instances( boxes=boxes, labels=labels, assigned_colors=assigned_colors ) # Display in largest to smallest order to reduce occlusion. areas = None if boxes is not None: areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) elif masks is not None: areas = np.asarray([x.area() for x in masks]) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] keypoints = keypoints[sorted_idxs] if keypoints is not None else None for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if masks is not None: for segment in masks[i].polygons: self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = (x0, y0) # if drawing boxes, put text on the box corner. horiz_align = "left" elif masks is not None: # skip small mask without polygon if len(masks[i].polygons) == 0: continue x0, y0, x1, y1 = masks[i].bbox() # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] horiz_align = "center" else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if ( instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale ): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) if is_text: self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) # draw keypoints if keypoints is not None: for keypoints_per_instance in keypoints: self.draw_and_connect_keypoints(keypoints_per_instance) return self.output def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): """ Args: boxes (ndarray): an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image. labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = len(boxes) if assigned_colors is None: # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. if boxes is not None: areas = boxes[:, 2] * boxes[:, 3] sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] labels = [labels[k] for k in sorted_idxs] if labels is not None else None colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): self.draw_rotated_box_with_label( boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None ) return self.output def draw_and_connect_keypoints(self, keypoints): """ Draws keypoints of an instance and follows the rules for keypoint connections to draw lines between appropriate keypoints. This follows color heuristics for line color. Args: keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints and the last dimension corresponds to (x, y, probability). Returns: output (VisImage): image object with visualizations. """ visible = {} keypoint_names = self.metadata.get("keypoint_names") for idx, keypoint in enumerate(keypoints): # draw keypoint x, y, prob = keypoint if prob > self.keypoint_threshold: self.draw_circle((x, y), color=_RED) if keypoint_names: keypoint_name = keypoint_names[idx] visible[keypoint_name] = (x, y) if self.metadata.get("keypoint_connection_rules"): for kp0, kp1, color in self.metadata.keypoint_connection_rules: if kp0 in visible and kp1 in visible: x0, y0 = visible[kp0] x1, y1 = visible[kp1] color = tuple(x / 255.0 for x in color) self.draw_line([x0, x1], [y0, y1], color=color) # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip # Note that this strategy is specific to person keypoints. # For other keypoints, it should just do nothing try: ls_x, ls_y = visible["left_shoulder"] rs_x, rs_y = visible["right_shoulder"] mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 except KeyError: pass else: # draw line from nose to mid-shoulder nose_x, nose_y = visible.get("nose", (None, None)) if nose_x is not None: self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) try: # draw line from mid-shoulder to mid-hip lh_x, lh_y = visible["left_hip"] rh_x, rh_y = visible["right_hip"] except KeyError: pass else: mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) return self.output """ Primitive drawing functions: """ def draw_text( self, text, position, *, font_size=None, color="g", horizontal_alignment="center", rotation=0, ): """ Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. """ if not font_size: font_size = self._default_font_size # since the text background is dark, we don't want the text to be dark color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) x, y = position self.output.ax.text( x, y, text, size=font_size * self.output.scale, family="sans-serif", bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, verticalalignment="top", horizontalalignment=horizontal_alignment, color=color, zorder=10, rotation=rotation, ) return self.output def draw_box(self, box_coord, alpha=1.0, edge_color="g", line_style="-"): """ Args: box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 are the coordinates of the image's top left corner. x1 and y1 are the coordinates of the image's bottom right corner. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. Returns: output (VisImage): image object with box drawn. """ x0, y0, x1, y1 = box_coord width = x1 - x0 height = y1 - y0 linewidth = 2 self.output.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=edge_color, linewidth=linewidth * self.output.scale, alpha=alpha, linestyle=line_style, ) ) return self.output def draw_rotated_box_with_label( self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None ): """ Draw a rotated box with label on its top-left corner. Args: rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), where cnt_x and cnt_y are the center coordinates of the box. w and h are the width and height of the box. angle represents how many degrees the box is rotated CCW with regard to the 0-degree box. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. label (string): label for rotated box. It will not be rendered when set to None. Returns: output (VisImage): image object with box drawn. """ cnt_x, cnt_y, w, h, angle = rotated_box area = w * h # use thinner lines when the box is small linewidth = self._default_font_size / ( 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 ) theta = angle * math.pi / 180.0 c = math.cos(theta) s = math.sin(theta) rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] # x: left->right ; y: top->down rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] for k in range(4): j = (k + 1) % 4 self.draw_line( [rotated_rect[k][0], rotated_rect[j][0]], [rotated_rect[k][1], rotated_rect[j][1]], color=edge_color, linestyle="--" if k == 1 else line_style, linewidth=linewidth, ) if label is not None: text_pos = rotated_rect[1] # topleft corner height_ratio = h / np.sqrt(self.output.height * self.output.width) label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) return self.output def draw_circle(self, circle_coord, color, radius=3): """ Args: circle_coord (list(int) or tuple(int)): contains the x and y coordinates of the center of the circle. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. radius (int): radius of the circle. Returns: output (VisImage): image object with box drawn. """ x, y = circle_coord self.output.ax.add_patch( mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) ) return self.output def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): """ Args: x_data (list[int]): a list containing x values of all the points being drawn. Length of list should match the length of y_data. y_data (list[int]): a list containing y values of all the points being drawn. Length of list should match the length of x_data. color: color of the line. Refer to `matplotlib.colors` for a full list of formats that are accepted. linestyle: style of the line. Refer to `matplotlib.lines.Line2D` for a full list of formats that are accepted. linewidth (float or None): width of the line. When it's None, a default value will be computed and used. Returns: output (VisImage): image object with line drawn. """ if linewidth is None: linewidth = self._default_font_size / 3 linewidth = max(linewidth, 1) self.output.ax.add_line( mpl.lines.Line2D( x_data, y_data, linewidth=linewidth * self.output.scale, color=color, linestyle=linestyle, ) ) return self.output def draw_binary_mask( self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10, is_text=True, ): """ Args: binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and W is the image width. Each value in the array is either a 0 or 1 value of uint8 type. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. area_threshold (float): a connected component smaller than this area will not be shown. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) has_valid_segment = False binary_mask = binary_mask.astype("uint8") # opencv needs uint8 mask = GenericMask(binary_mask, self.output.height, self.output.width) shape2d = (binary_mask.shape[0], binary_mask.shape[1]) if not mask.has_holes: # draw polygons for regular masks for segment in mask.polygons: # area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) # if area < (area_threshold or 0): # continue has_valid_segment = True segment = segment.reshape(-1, 2) self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) else: # TODO: Use Path/PathPatch to draw vector graphics: # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha has_valid_segment = True self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if is_text: if text is not None and has_valid_segment: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): """ Args: soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) shape2d = (soft_mask.shape[0], soft_mask.shape[1]) rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = soft_mask * alpha self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) binary_mask = (soft_mask > 0.5).astype("uint8") # self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
6
2023-12-05 02:51:53+00:00
24k
DiffusionLight/DiffusionLight
relighting/inpainter.py
[ { "identifier": "CustomStableDiffusionControlNetInpaintPipeline", "path": "relighting/pipeline.py", "snippet": "class CustomStableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] =...
import torch import numpy as np import os import pickle from diffusers import ControlNetModel, AutoencoderKL from PIL import Image from tqdm.auto import tqdm from transformers import pipeline as transformers_pipeline from relighting.pipeline import CustomStableDiffusionControlNetInpaintPipeline from relighting.pipeline_inpaintonly import CustomStableDiffusionInpaintPipeline, CustomStableDiffusionXLInpaintPipeline from relighting.argument import SAMPLERS, VAE_MODELS, DEPTH_ESTIMATOR, get_control_signal_type from relighting.image_processor import ( estimate_scene_depth, estimate_scene_normal, merge_normal_map, fill_depth_circular ) from relighting.ball_processor import get_ideal_normal_ball, crop_ball from relighting.pipeline_xl import CustomStableDiffusionXLControlNetInpaintPipeline
18,155
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16) pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, torch_dtype=torch_dtype, ).to(device) control_generator = ControlSignalGenerator("sd", control_signal_type, device=device) else:
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16) pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, torch_dtype=torch_dtype, ).to(device) control_generator = ControlSignalGenerator("sd", control_signal_type, device=device) else:
pipe = CustomStableDiffusionInpaintPipeline.from_pretrained(
1
2023-12-07 14:03:31+00:00
24k
modelscope/normal-depth-diffusion
ldm/models/diffusion/wovae_ddpm.py
[ { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\...
import pdb import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F from contextlib import contextmanager from functools import partial from einops import rearrange, repeat from ldm.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface) from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.modules.attention import CrossAttention from ldm.modules.diffusionmodules.util import (extract_into_tensor, make_beta_schedule, noise_like) from ldm.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl) from ldm.modules.ema import LitEma from ldm.util import (count_params, default, exists, filter_nan_loss, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat) from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.rank_zero import rank_zero_only
16,919
'image', 'LR_image', 'segmentation', 'bbox_img', 'ic' ] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1 ) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{ c_key: [c[:, :, :, :, i]] } for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params[ 'original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2**(num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ (rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ (x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor( self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance( cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([ torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd ]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange( adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view( (o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor( [self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers this is without vae ddpm -- merci """ try: except: __conditioning_keys__ = { 'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y' } def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class anneal_identity(): def __call__(self, x, global_step): return x def upper_bound(arr, key): left = 0 right = len(arr) while left < right: mid = (left + right) >> 1 if arr[mid] < key: left = mid + 1 else: right = mid return left class anneal_warmup(): def __init__(self, anneal_ratio, anneal_global_step, num_steps): self.anneal_ratio = anneal_ratio self.anneal_global_step = anneal_global_step self.steps = num_steps // (len(anneal_global_step) + 1) self.start_steps = self.steps def __call__(self, x, global_step): if (torch.rand(1) > self.anneal_ratio).item(): return x else: return int(self.start_steps + self.start_steps * upper_bound(self.anneal_global_step, global_step)) class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None: self.prior_model = instantiate_from_config(prior_model) else: self.prior_model = None if prior_normal is not None: self.prior_normal = instantiate_from_config(prior_normal) else: self.prior_normal = None def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[ 0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( 'posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer( 'posterior_mean_coef1', to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer( 'posterior_mean_coef2', to_torch((1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == 'eps': lvlb_weights = self.betas**2 / (2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == 'x0': lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / ( 2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError('mu not supported') # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f'{context}: Switched to EMA weights') try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f'{context}: Restored training weights') def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location='cpu') if 'state_dict' in list(sd.keys()): sd = sd['state_dict'] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print('Deleting key {} from state_dict.'.format(k)) del sd[k] missing, unexpected = self.load_state_dict( sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print( f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' ) if len(missing) > 0: print(f'Missing Keys: {missing}') if len(unexpected) > 0: print(f'Unexpected Keys: {unexpected}') if self.use_ema: if len(missing) > 0: model_ema_str = sorted(missing)[-1] # missing model_ema if 'model_ema' in model_ema_str: print(f'Reinitialize model_ema') self.model_ema = LitEma(self.model) print( f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.' ) else: if self.ema_copy == True: print(f'Reinitialize model_ema') self.model_ema = LitEma(self.model) print( f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.' ) def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == 'eps': x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == 'x0': x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape( b, *((1, ) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample( img, torch.full((b, ), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss( target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == 'eps': target = noise elif self.parameterization == 'x0': target = x_start else: raise NotImplementedError( f'Paramterization {self.parameterization} not yet supported') loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0], ), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict # property of model for (to, cuda, cpu, float, half, ...) def to(self, *args, **kwargs): # type: ignore[valid-type] """See :meth:`torch.nn.Module.to`.""" # this converts `str` device to `torch.device` if self.prior_model is not None: self.prior_model.to(*args, **kwargs) if self.prior_normal is not None: self.prior_normal.to(*args, **kwargs) return super().to(*args, **kwargs) def cuda(self, device=None): # type: ignore[valid-type] """Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Arguments: device: If specified, all parameters will be copied to that device. If `None`, the current CUDA device index will be used. Returns: Module: self """ if device is None: device = torch.device('cuda', torch.cuda.current_device()) elif isinstance(device, int): device = torch.device('cuda', index=device) if self.prior_model is not None: self.prior_model.cuda(device) if self.prior_normal is not None: self.prior_normal.cuda(device) return super().cuda(device=device) def cpu(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.cpu`.""" if self.prior_model is not None: self.prior_model.cpu() if self.prior_normal is not None: self.prior_normal.cpu() return super().cpu() def float(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.float`.""" if self.prior_model is not None: self.prior_model.float() if self.prior_normal is not None: self.prior_normal.float() return super().float() def double(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.double`.""" if self.prior_model is not None: self.prior_model.double() if self.prior_normal is not None: self.prior_normal.double() return super().double() def half(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.half`.""" if self.prior_model is not None: self.prior_model.half() if self.prior_normal is not None: self.prior_normal.half() return super().half() def prior_to_eval(self): if self.prior_model is not None: self.prior_model.eval() if self.prior_normal is not None: self.prior_normal.eval() @torch.no_grad() def prior_inference(self, inputs, prior_inputs): # depth prior model # midas or zoe is 384 model inputs = inputs.permute(0, 3, 1, 2) prior_results = {} self.prior_to_eval() # using depth prior if self.prior_model is not None: model_prior_results = self.prior_model(prior_inputs) prior_results.update(model_prior_results) # using normal map if self.prior_normal is not None: normal_prior_results = self.prior_normal(prior_inputs) prior_results.update(normal_prior_results) resize_prior_results = {} _, __, h, w = inputs.shape for key in prior_results.keys(): resize_prior_results[key] = F.interpolate( prior_results[key], (w, h), mode='bilinear') # add a rgb input resize_prior_results.update({'rgb': inputs}) input_container = [] for key in self.input_keys: input_container.append(resize_prior_results[key]) return torch.cat(input_container, dim=1).permute(0, 2, 3, 1) @torch.no_grad() def collect_inputs(self, batch): input_container = [] for key in self.input_keys: # [B H W C] input_container.append(batch[key]) return torch.cat(input_container, dim=-1) def training_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log( 'global_step', self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log( 'lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = { key + '_ema': loss_dict_ema[key] for key in loss_dict_ema } self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) @torch.no_grad() def test_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = { key + '_ema': loss_dict_ema[key] for key in loss_dict_ema } self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): # args: outputs, batch, batch_idx if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log['inputs'] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log['diffusion_row'] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope('Plotting'): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True) log['samples'] = samples log['denoise_row'] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key='image', cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, first_stage_ckpts=None, without_crossattn=False, ema_copy=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop('ckpt_path', None) ignore_keys = kwargs.pop('ignore_keys', []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len( first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.first_stage_ckpts = first_stage_ckpts # VAE Load self.instantiate_first_stage(first_stage_config) # CLIP load self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False self.ema_copy = ema_copy if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if self.first_stage_ckpts is not None: first_stage_ckpts = torch.load( self.first_stage_ckpts, map_location='cpu') no_match = self.first_stage_model.load_state_dict( first_stage_ckpts['state_dict'], strict=False) print('encode-decode, no match keys:\n {}'.format(no_match)) for param in self.first_stage_model.parameters(): param.requires_grad = False # lambda-stage-1 without crossattn if without_crossattn: for m in self.modules(): if isinstance(m, CrossAttention): for para in m.parameters(): para.requires_grad = False # RuntimeError: One of the differentiated Tensors does not require grad def make_cond_schedule(self, ): self.cond_ids = torch.full( size=(self.num_timesteps, ), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print('### USING STD-RESCALING ###') x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f'setting self.scale_factor to {self.scale_factor}') print('### USING STD-RESCALING ###') def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == '__is_first_stage__': print('Using first stage also as cond stage.') self.cond_stage_model = self.first_stage_model elif config == '__is_unconditional__': print( f'Training {self.__class__.__name__} as an unconditional model.' ) self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): ''' # CLIP embedding ''' if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable( self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params['clip_min_weight'], self.split_input_params['clip_max_weight'], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params['tie_braker']: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params['clip_min_tie_weight'], self.split_input_params['clip_max_tie_weight']) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.1): ''' we add uncondition prompts to improve classifer-free guidance results ''' x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) ''' encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() ''' _, _c, _h, _w = x.shape z = F.interpolate( x, (_w // 8, _h // 8), mode='bilinear', align_corners=False) if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} # To support classifier-free guidance, randomly drop out only text conditioning 10% like sd-v1.5 random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < uncond, 'n -> n 1 1') null_prompts = self.get_learned_conditioning(['']).to(c.device) cc = torch.where(prompt_mask, null_prompts, c) out = [z, cc] if return_first_stage_outputs: xrec = F.interpolate( z, (_w, _h), mode='bilinear', align_corners=False) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry( z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) uf = self.split_input_params['vqf'] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack( output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry( z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) uf = self.split_input_params['vqf'] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack( output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) df = self.split_input_params['vqf'] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): # obtain encode(x), conditon x, c = self.get_input(batch, self.first_stage_key) # ddpm loss = self(x, c) return loss def guassian_distributed(self, x, sigma=100): y = torch.exp(-(x)**2 / (2 * sigma**2)) return y / y.sum() def forward(self, x, c, *args, **kwargs): # anneal t finetune num_timesteps = self.anneal_func(self.num_timesteps, self.global_step) t = torch.randint( 0, num_timesteps, (x.shape[0], ), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample( x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, 'split_input_params'): assert len( cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in [ 'image', 'LR_image', 'segmentation', 'bbox_img', 'ic' ] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1 ) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{ c_key: [c[:, :, :, :, i]] } for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params[ 'original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2**(num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ (rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ (x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor( self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance( cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([ torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd ]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange( adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view( (o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor( [self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
21
2023-12-06 07:29:34+00:00
24k
RobertCsordas/moe_attention
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ...
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.fast_rope_transformer import FastRopeTransformerEncoderLayer from layers.transformer.moe_attention_relative_transformer import MoeAttentionRelativeTransformerEncoderLayer from layers.moe_layer import MoE from interfaces import Result from layers import LayerVisualizer from layers.transformer.full_moe_relative_attention import FullMoeRelativeAttentionCore
19,108
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128)
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128)
@task()
1
2023-12-13 08:45:02+00:00
24k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/modeling/meta_arch/retinanet.py
[ { "identifier": "configurable", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`...
import logging import math import torch from typing import List, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from ...config import configurable from ...layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm from ...structures import Boxes, ImageList, Instances, pairwise_iou from ...utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from .build import META_ARCH_REGISTRY from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
16,303
backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item()
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
10
2023-12-10 20:14:00+00:00
24k
mkang315/ASF-YOLO
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import json import os import sys import numpy as np import torch import torch.nn.functional as F import time from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
19,567
rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
3
2023-12-10 14:18:29+00:00
24k
youngskkim/CRN
exps/base_exp.py
[ { "identifier": "NuscDatasetRadarDet", "path": "datasets/nusc_det_dataset.py", "snippet": "class NuscDatasetRadarDet(Dataset):\n def __init__(self,\n ida_aug_conf,\n bda_aug_conf,\n rda_aug_conf,\n classes,\n data_root,\n...
from functools import partial from pytorch_lightning.core import LightningModule from torch.cuda.amp.autocast_mode import autocast from torch.optim.lr_scheduler import MultiStepLR from mmcv.runner import build_optimizer from datasets.nusc_det_dataset import NuscDatasetRadarDet, collate_fn from evaluators.det_evaluators import DetNuscEvaluator from models.base_bev_depth import BaseBEVDepth from utils.torch_dist import all_gather_object, synchronize import mmcv import torch import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed import torchvision.models as models
14,786
out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir self.evaluator = DetNuscEvaluator(class_names=self.class_names, output_dir=self.default_root_dir)
# Copyright (c) Megvii Inc. All rights reserved. pretrain_config = dict( img_model_path=None, img_load_key=[], img_freeze_key=None, pts_model_path=None, pts_load_key=[]) optimizer_config = dict( type='AdamW', lr=2e-4, weight_decay=1e-2) H = 900 W = 1600 final_dim = (256, 704) img_conf = dict(img_mean=[123.675, 116.28, 103.53], img_std=[58.395, 57.12, 57.375], to_rgb=True) ida_aug_conf = { 'resize_lim': (0.386, 0.55), 'final_dim': final_dim, 'rot_lim': (-5.4, 5.4), 'H': 900, 'W': 1600, 'rand_flip': True, 'bot_pct_lim': (0.0, 0.0), 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], 'Ncams': 6, } bda_aug_conf = { 'rot_ratio': 1.0, 'rot_lim': (-22.5, 22.5), 'scale_lim': (0.95, 1.05), 'flip_dx_ratio': 0.5, 'flip_dy_ratio': 0.5 } rda_aug_conf = { 'N_sweeps': 6, 'N_use': 5, 'drop_ratio': 0.1, } backbone_img_conf = { 'x_bound': [-51.2, 51.2, 0.8], 'y_bound': [-51.2, 51.2, 0.8], 'z_bound': [-5, 3, 8], 'd_bound': [2.0, 58.0, 0.8], 'final_dim': final_dim, 'output_channels': 80, 'downsample_factor': 16, 'img_backbone_conf': dict( type='ResNet', depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir self.evaluator = DetNuscEvaluator(class_names=self.class_names, output_dir=self.default_root_dir)
self.model = BaseBEVDepth(self.backbone_img_conf,
3
2023-12-06 14:57:49+00:00
24k
jinxixiang/magic_animate_unofficial
animatediff/magic_animate/pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/magic_animate/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optiona...
import inspect, math import numpy as np import torch import torch.distributed as dist import einops from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from animatediff.magic_animate.unet_controlnet import UNet3DConditionModel from animatediff.magic_animate.controlnet import ControlNetModel from animatediff.magic_animate.mutual_self_attention import ReferenceAttentionControl from animatediff.magic_animate.context import ( get_context_scheduler, get_total_steps ) from animatediff.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
17,193
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNet3DConditionModel,
0
2023-12-12 00:16:39+00:00
24k
qitan/devops-backend-lite
common/ext_fun.py
[ { "identifier": "generate_docu", "path": "common/utils/ElasticSearchAPI.py", "snippet": "def generate_docu(table, index_version=None):\n index_name = f\"{table.name}-{index_version}\" if index_version else table.name\n _tbindex = Index(index_name)\n _tbindex.analyzer(my_normalizer)\n _tbinde...
from gitlab.exceptions import GitlabGetError from functools import reduce from common.utils.ElasticSearchAPI import generate_docu, Search from common.utils.GitLabAPI import GitLabAPI from common.utils.HarborAPI import HarborAPI from common.utils.JenkinsAPI import GlueJenkins from common.custom_format import convert_xml_to_str_with_pipeline from common.variables import DASHBOARD_TIME_FORMAT, DASHBOARD_TIME_FORMAT_T, DASHBOARD_TIME_FREQNAMES, \ DASHBOARD_TIME_FREQNAMES_T, SENSITIVE_KEYS, JENKINS_CALLBACK_KEY, \ JENKINS_STATUS_MAP, DEV_LANGUAGE_KEY from dbapp.models import AppInfo, Product, KubernetesCluster, KubernetesDeploy, MicroApp, Project, ProjectConfig, DevLanguage, BuildJob, UserProfile, SystemConfig, Role, Permission, Menu, DataDict from django.conf import settings from django.core.cache import cache from django.utils import timezone from django.db.models import Q from social_django.utils import load_strategy from rest_framework.utils.serializer_helpers import ReturnDict from config import SOCIAL_AUTH_GITLAB_API_URL, GITLAB_ADMIN_TOKEN from common.utils.K8sAPI import K8sAPI from urllib.parse import urlparse, quote_plus from dateutil.relativedelta import relativedelta from dateutil.rrule import rrule from ruamel import yaml from datetime import datetime, timedelta from celery import current_app import copy import operator import re import time import pytz import os import json import requests import math import shortuuid import logging
15,168
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : qqing_lai@hotmail.com @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self): user = UserProfile.objects.get_or_create(username='thirdparty')[0] self.set_permission(user, self.get_role()) return user def get_role(self): return Role.objects.get_or_create(name='thirdparty')[0] def get_perm(self): return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0] def set_permission(self, user, role): role.permissions.set([self.get_perm().id]) user.roles.set([role.id]) def set_redis_data(name, config): cache.set(f"system:{name}", config, None) def get_redis_data(name): ret = cache.get(f"system:{name}") if not ret: try: if name == 'cicd-harbor': qs = SystemConfig.objects.filter(type=name)[0] else: qs = SystemConfig.objects.get(name=name) except BaseException as e: return None ret = json.loads(qs.config) set_redis_data(name, ret) return ret def get_datadict(name, config=0, default_value=None): """ 从数据字典获取数据 """ try: qs = DataDict.objects.get(key=name) except BaseException as e: return default_value if config: ret = json.loads(qs.extra) else: ret = {'id': qs.id, 'key': qs.key, 'value': qs.value, 'desc': qs.desc} return ret def check_pods(cluster_id, k8s_config, namespace, **kwargs):
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : qqing_lai@hotmail.com @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self): user = UserProfile.objects.get_or_create(username='thirdparty')[0] self.set_permission(user, self.get_role()) return user def get_role(self): return Role.objects.get_or_create(name='thirdparty')[0] def get_perm(self): return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0] def set_permission(self, user, role): role.permissions.set([self.get_perm().id]) user.roles.set([role.id]) def set_redis_data(name, config): cache.set(f"system:{name}", config, None) def get_redis_data(name): ret = cache.get(f"system:{name}") if not ret: try: if name == 'cicd-harbor': qs = SystemConfig.objects.filter(type=name)[0] else: qs = SystemConfig.objects.get(name=name) except BaseException as e: return None ret = json.loads(qs.config) set_redis_data(name, ret) return ret def get_datadict(name, config=0, default_value=None): """ 从数据字典获取数据 """ try: qs = DataDict.objects.get(key=name) except BaseException as e: return default_value if config: ret = json.loads(qs.extra) else: ret = {'id': qs.id, 'key': qs.key, 'value': qs.value, 'desc': qs.desc} return ret def check_pods(cluster_id, k8s_config, namespace, **kwargs):
k8s = KubernetesCluster.objects.get(id=cluster_id)
14
2023-12-13 03:09:32+00:00
24k
MarilynKeller/aitviewer-skel
aitviewer/renderables/sdf.py
[ { "identifier": "BoundingBoxes", "path": "aitviewer/renderables/bounding_boxes.py", "snippet": "class BoundingBoxes(Node):\n \"\"\"\n Draw bounding boxes.\n \"\"\"\n\n def __init__(self, vertices, thickness=0.005, color=(0.0, 0.0, 1.0, 1.0), **kwargs):\n \"\"\"\n Initializer.\n...
import numpy as np from skimage import measure from aitviewer.renderables.bounding_boxes import BoundingBoxes from aitviewer.renderables.lines import Lines from aitviewer.renderables.meshes import Meshes from aitviewer.scene.node import Node from aitviewer.utils.decorators import hooked
19,337
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh") # Level sets. self.level_sets: list[Meshes] = [] if level_sets is not None: if level_set_colors is not None: assert len(level_sets) == len(level_set_colors) for i, s in enumerate(level_sets): verts, faces, normals, _ = measure.marching_cubes( volume, s, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) shell = Meshes(verts, faces, vertex_normals=-normals, name=f"Level {s:.03f}", cast_shadow=False) if level_set_colors is not None: shell.color = tuple(level_set_colors[i]) shell.clip_control = np.array((1, 1, 1)) shell.clip_value = self.size.copy() shell.backface_culling = False self.level_sets.append(shell) # Bounding box.
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh") # Level sets. self.level_sets: list[Meshes] = [] if level_sets is not None: if level_set_colors is not None: assert len(level_sets) == len(level_set_colors) for i, s in enumerate(level_sets): verts, faces, normals, _ = measure.marching_cubes( volume, s, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) shell = Meshes(verts, faces, vertex_normals=-normals, name=f"Level {s:.03f}", cast_shadow=False) if level_set_colors is not None: shell.color = tuple(level_set_colors[i]) shell.clip_control = np.array((1, 1, 1)) shell.clip_value = self.size.copy() shell.backface_culling = False self.level_sets.append(shell) # Bounding box.
self.bounding_box = BoundingBoxes.from_min_max_diagonal(
0
2023-12-07 16:13:50+00:00
24k
nexB/dejacode
product_portfolio/filters.py
[ { "identifier": "ComponentKeyword", "path": "component_catalog/models.py", "snippet": "class ComponentKeyword(DataspacedModel):\n label = models.CharField(\n max_length=50,\n blank=True,\n help_text=_(\n \"A short, descriptive label to categorize components and support...
from django import forms from django.contrib import admin from django.utils.translation import gettext_lazy as _ from component_catalog.models import ComponentKeyword from component_catalog.programming_languages import PROGRAMMING_LANGUAGES from dje.filters import BooleanChoiceFilter from dje.filters import DataspacedFilterSet from dje.filters import DefaultOrderingFilter from dje.filters import HasRelationFilter from dje.filters import MatchOrderedSearchFilter from dje.filters import SearchFilter from dje.widgets import BootstrapSelectMultipleWidget from dje.widgets import DropDownWidget from license_library.models import License from product_portfolio.models import CodebaseResource from product_portfolio.models import Product from product_portfolio.models import ProductComponent from product_portfolio.models import ProductPackage from product_portfolio.models import ProductStatus import django_filters
14,960
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # class ProductFilterSet(DataspacedFilterSet): q = MatchOrderedSearchFilter( label=_("Search"), match_order_fields=[ "name", "components__name", "packages__filename", ], search_fields=[ "name", "version", "components__name", "packages__filename", ], distinct=True, widget=forms.widgets.HiddenInput, ) sort = DefaultOrderingFilter( label=_("Sort"), fields=[ "name", "version", "license_expression", "primary_language", "owner", "configuration_status", ], field_labels={ "primary_language": "Language", "configuration_status": "Configuration Status", }, empty_label="Default", ) configuration_status = django_filters.ModelMultipleChoiceFilter( label=_("Configuration status"), field_name="configuration_status__label", to_field_name="label", queryset=ProductStatus.objects.all(), widget=BootstrapSelectMultipleWidget( search=False, search_placeholder="Search configuration status", ), ) primary_language = django_filters.MultipleChoiceFilter( label=_("Language"), choices=[(language, language) for language in PROGRAMMING_LANGUAGES], widget=BootstrapSelectMultipleWidget( search_placeholder="Search languages", ), ) licenses = django_filters.ModelMultipleChoiceFilter( label=_("License"), field_name="licenses__key", to_field_name="key", queryset=License.objects.all(), widget=BootstrapSelectMultipleWidget( search_placeholder="Search licenses", ), ) keywords = django_filters.ModelMultipleChoiceFilter( label=_("Keyword"), to_field_name="label", lookup_expr="contains", queryset=ComponentKeyword.objects.all().only("label", "dataspace"), widget=BootstrapSelectMultipleWidget( search_placeholder="Search keywords", ), ) class Meta: model = Product fields = [ "q", "licenses", "primary_language", "configuration_status", "keywords", ] class BaseProductRelationFilterSet(DataspacedFilterSet): is_deployed = BooleanChoiceFilter( empty_label="All (Inventory)", choices=( ("yes", _("Yes (BOM)")), ("no", _("No (Internal Use Only)")), ),
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # class ProductFilterSet(DataspacedFilterSet): q = MatchOrderedSearchFilter( label=_("Search"), match_order_fields=[ "name", "components__name", "packages__filename", ], search_fields=[ "name", "version", "components__name", "packages__filename", ], distinct=True, widget=forms.widgets.HiddenInput, ) sort = DefaultOrderingFilter( label=_("Sort"), fields=[ "name", "version", "license_expression", "primary_language", "owner", "configuration_status", ], field_labels={ "primary_language": "Language", "configuration_status": "Configuration Status", }, empty_label="Default", ) configuration_status = django_filters.ModelMultipleChoiceFilter( label=_("Configuration status"), field_name="configuration_status__label", to_field_name="label", queryset=ProductStatus.objects.all(), widget=BootstrapSelectMultipleWidget( search=False, search_placeholder="Search configuration status", ), ) primary_language = django_filters.MultipleChoiceFilter( label=_("Language"), choices=[(language, language) for language in PROGRAMMING_LANGUAGES], widget=BootstrapSelectMultipleWidget( search_placeholder="Search languages", ), ) licenses = django_filters.ModelMultipleChoiceFilter( label=_("License"), field_name="licenses__key", to_field_name="key", queryset=License.objects.all(), widget=BootstrapSelectMultipleWidget( search_placeholder="Search licenses", ), ) keywords = django_filters.ModelMultipleChoiceFilter( label=_("Keyword"), to_field_name="label", lookup_expr="contains", queryset=ComponentKeyword.objects.all().only("label", "dataspace"), widget=BootstrapSelectMultipleWidget( search_placeholder="Search keywords", ), ) class Meta: model = Product fields = [ "q", "licenses", "primary_language", "configuration_status", "keywords", ] class BaseProductRelationFilterSet(DataspacedFilterSet): is_deployed = BooleanChoiceFilter( empty_label="All (Inventory)", choices=( ("yes", _("Yes (BOM)")), ("no", _("No (Internal Use Only)")), ),
widget=DropDownWidget(
9
2023-12-07 16:57:42+00:00
24k
wusize/CLIM
src/open_clip/model.py
[ { "identifier": "HFTextEncoder", "path": "src/open_clip/hf_model.py", "snippet": "class HFTextEncoder(nn.Module):\n \"\"\"HuggingFace model adapter\"\"\"\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n model_name_or_path: str,\n output_dim: in...
from dataclasses import dataclass from typing import Optional, Tuple, Union from torch import nn from torch.utils.checkpoint import checkpoint from .hf_model import HFTextEncoder from .modified_resnet import ModifiedResNet from .timm_model import TimmModel from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer from .utils import to_2tuple import logging import math import numpy as np import torch import torch.nn.functional as F
16,255
timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm visual = VisionTransformer( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, ls_init_value=vision_cfg.ls_init_value, patch_dropout=vision_cfg.patch_dropout, input_patchnorm=vision_cfg.input_patchnorm, global_average_pool=vision_cfg.global_average_pool, attentional_pool=vision_cfg.attentional_pool, n_queries=vision_cfg.n_queries, attn_pooler_heads=vision_cfg.attn_pooler_heads, output_tokens=vision_cfg.output_tokens, output_dim=embed_dim, act_layer=act_layer, norm_layer=norm_layer, ) return visual def _build_text_tower( embed_dim: int, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, ): if isinstance(text_cfg, dict): text_cfg = CLIPTextCfg(**text_cfg) if text_cfg.hf_model_name: text = HFTextEncoder( text_cfg.hf_model_name, output_dim=embed_dim, proj=text_cfg.proj, pooler_type=text_cfg.pooler_type, pretrained=text_cfg.hf_model_pretrained, output_tokens=text_cfg.output_tokens, ) else: act_layer = QuickGELU if quick_gelu else nn.GELU norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm visual = VisionTransformer( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, ls_init_value=vision_cfg.ls_init_value, patch_dropout=vision_cfg.patch_dropout, input_patchnorm=vision_cfg.input_patchnorm, global_average_pool=vision_cfg.global_average_pool, attentional_pool=vision_cfg.attentional_pool, n_queries=vision_cfg.n_queries, attn_pooler_heads=vision_cfg.attn_pooler_heads, output_tokens=vision_cfg.output_tokens, output_dim=embed_dim, act_layer=act_layer, norm_layer=norm_layer, ) return visual def _build_text_tower( embed_dim: int, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, ): if isinstance(text_cfg, dict): text_cfg = CLIPTextCfg(**text_cfg) if text_cfg.hf_model_name: text = HFTextEncoder( text_cfg.hf_model_name, output_dim=embed_dim, proj=text_cfg.proj, pooler_type=text_cfg.pooler_type, pretrained=text_cfg.hf_model_pretrained, output_tokens=text_cfg.output_tokens, ) else: act_layer = QuickGELU if quick_gelu else nn.GELU norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
text = TextTransformer(
8
2023-12-09 05:43:08+00:00
24k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields)...
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
16,026
12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage,
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage', 10212: ChangeAvatarNameMessage, 10309: 'GetAllianceInviteTokenMessage', 10321: 'AttributionEventMessage', 10401: 'CreateGameMessage', 10501: 'AcceptFriendMessage', 10502: 'AddFriendMessage', 10503: 'AskForAddableFriendsMessage', 10504: 'AskForFriendListMessage', 10506: 'RemoveFriendMessage', 10507: 'AddFriendByEmailMessage', 10509: 'AddFriendByAvatarNameAndCodeMessage', 10512: 'AskForPlayingGamecenterFriendsMessage', 10513: 'AskForPlayingFacebookFriendsMessage', 10514: 'AskForPlayingKakaoFriendsMessage', 10515: 'AskForPlayingTencentFriendsMessage', 10516: 'AskForPlayingLineFriendsMessage', 10517: 'AskForPlayingSupercellFriendsMessage', 10523: 'YoozooBillingRequestMessage', 10555: 'ClientInputMessage', 10576: 'SetBlockFriendRequestsMessage', 10599: 'AskForFriendSuggestionsMessage', 10636: 'SCIDBindAccountMessage', 11736: 'SCIDLogoutAllDevicesMessage', 12100: 'CreatePlayerMapMessage', 12101: 'DeletePlayerMapMessage', 12102: 'GetPlayerMapsMessage', 12103: 'UpdatePlayerMapMessage', 12104: 'SubmitPlayerMapMessage', 12105: 'PublishPlayerMapMessage', 12106: 'ChangePlayerMapNameMessage', 12107: 'EnterMapEditorMessage', 12108: 'GoHomeFromMapEditorMessage', 12110: 'TeamSetPlayerMapMessage', 12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage,
20104: LoginOkMessage,
11
2023-12-14 18:57:56+00:00
24k
GXNU-ZhongLab/ODTrack
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, H...
import os import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
18,241
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB")
datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader))
2
2023-12-10 03:57:19+00:00
24k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n ...
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
15,816
elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None qp_info_list = get_qp_info_list(switch_msg_snapshot) packet_list = get_packet_list(pcap_filename) packet_list.sort(key=lambda x:x.get_switch_seqnum()) result_logger.info("Packet trace sorted by switch sequence number.") ## Do integrity check to make sure there is nothing wrong with traces and counters integrity_checker = integrity_check.IntegrityCheck(packet_list=packet_list, switch_counter=switch_counter, requester_ip_list=requester_ip_list, responder_ip_list=responder_ip_list) if integrity_checker.check(): result_logger.info("Integrity check passed") else: result_logger.error("Integrity check failed") continue ## Check host counters host_counter_checker = host_check.HostCounterCheck() if host_counter_checker.check_no_packet_loss(requester_counter, responder_counter): result_logger.info("Host packet discard counter check passed") else: result_logger.error("Host packet discard counter check failed") continue rdma_verb = orchestrator.traffic_conf['rdma-verb'].lower().strip() if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER: logging.error("Invalid RDMA verb: %s" % rdma_verb) continue ## RDMA READ if rdma_verb == 'read': read_gbn_checker = read_gbn_check.ReadGBNCheck(packet_list=packet_list, qp_info_list=qp_info_list, num_msgs_per_qp=num_msgs_per_qp, msg_size=msg_size, mtu=mtu) if read_gbn_checker.check_all_qps() == True: result_logger.info("READ Go-Back-N state machine check passed for all qps.") else: result_logger.error("READ Go-Back-N state machine check failed") continue gbn_counter_check = read_gbn_checker.check_counters(sender_counter=responder_counter, receiver_counter=requester_counter) if gbn_counter_check == True: result_logger.info("READ Go-Back-N counter check passed.") else: result_logger.error("READ Go-Back-N counter check failed") continue ## A mix of RDMA SEND and READ elif rdma_verb == 'send_read': num_qps_send, num_qps_read = [int(x) for x in orchestrator.traffic_conf['num-qps'].split(',')] send_qp_info_list = qp_info_list[0:num_qps_send] read_qp_info_list = qp_info_list[num_qps_send:] send_gbn_checker = gbn_check.GBNCheck(packet_list=packet_list, qp_info_list=send_qp_info_list, num_data_pkts=math.ceil(msg_size/mtu) * num_msgs_per_qp) read_gbn_checker = read_gbn_check.ReadGBNCheck(packet_list=packet_list, qp_info_list=read_qp_info_list, num_msgs_per_qp=num_msgs_per_qp, msg_size=msg_size, mtu=mtu) if send_gbn_checker.check_all_qps() == True and read_gbn_checker.check_all_qps() == True: result_logger.info("Go-Back-N state machine check passed for all qps.") else: result_logger.error("Go-Back-N state machine check failed") continue send_gbn_counter_check = send_gbn_checker.check_counters(sender_counter=requester_counter, receiver_counter=responder_counter) read_gbn_counter_check = read_gbn_checker.check_counters(sender_counter=responder_counter, receiver_counter=requester_counter) if send_gbn_counter_check == True and read_gbn_counter_check == True: result_logger.info("Go-Back-N counter check passed for all qps.") else: result_logger.error("Go-Back-N counter check failed") continue else: ## Check the traces and counters against the GBN state machine gbn_checker = gbn_check.GBNCheck(packet_list=packet_list, qp_info_list=qp_info_list, num_data_pkts=math.ceil(msg_size/mtu) * num_msgs_per_qp) if gbn_checker.check_all_qps() == True: result_logger.info("Go-Back-N state machine check passed for all qps.") else: result_logger.error("Go-Back-N state machine check failed") continue gbn_counter_check = gbn_checker.check_counters(sender_counter=requester_counter, receiver_counter=responder_counter) if gbn_counter_check == True: result_logger.info("Go-Back-N counter check passed.") else: result_logger.error("Go-Back-N counter check failed") continue ## Output the latency for undelivered packets num_qps = len(qp_info_list) for qp_index in range(num_qps): is_read = (rdma_verb == 'read') or (rdma_verb == 'send_read' and qp_index >= num_qps_send)
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic(): requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish) elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None qp_info_list = get_qp_info_list(switch_msg_snapshot) packet_list = get_packet_list(pcap_filename) packet_list.sort(key=lambda x:x.get_switch_seqnum()) result_logger.info("Packet trace sorted by switch sequence number.") ## Do integrity check to make sure there is nothing wrong with traces and counters integrity_checker = integrity_check.IntegrityCheck(packet_list=packet_list, switch_counter=switch_counter, requester_ip_list=requester_ip_list, responder_ip_list=responder_ip_list) if integrity_checker.check(): result_logger.info("Integrity check passed") else: result_logger.error("Integrity check failed") continue ## Check host counters host_counter_checker = host_check.HostCounterCheck() if host_counter_checker.check_no_packet_loss(requester_counter, responder_counter): result_logger.info("Host packet discard counter check passed") else: result_logger.error("Host packet discard counter check failed") continue rdma_verb = orchestrator.traffic_conf['rdma-verb'].lower().strip() if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER: logging.error("Invalid RDMA verb: %s" % rdma_verb) continue ## RDMA READ if rdma_verb == 'read': read_gbn_checker = read_gbn_check.ReadGBNCheck(packet_list=packet_list, qp_info_list=qp_info_list, num_msgs_per_qp=num_msgs_per_qp, msg_size=msg_size, mtu=mtu) if read_gbn_checker.check_all_qps() == True: result_logger.info("READ Go-Back-N state machine check passed for all qps.") else: result_logger.error("READ Go-Back-N state machine check failed") continue gbn_counter_check = read_gbn_checker.check_counters(sender_counter=responder_counter, receiver_counter=requester_counter) if gbn_counter_check == True: result_logger.info("READ Go-Back-N counter check passed.") else: result_logger.error("READ Go-Back-N counter check failed") continue ## A mix of RDMA SEND and READ elif rdma_verb == 'send_read': num_qps_send, num_qps_read = [int(x) for x in orchestrator.traffic_conf['num-qps'].split(',')] send_qp_info_list = qp_info_list[0:num_qps_send] read_qp_info_list = qp_info_list[num_qps_send:] send_gbn_checker = gbn_check.GBNCheck(packet_list=packet_list, qp_info_list=send_qp_info_list, num_data_pkts=math.ceil(msg_size/mtu) * num_msgs_per_qp) read_gbn_checker = read_gbn_check.ReadGBNCheck(packet_list=packet_list, qp_info_list=read_qp_info_list, num_msgs_per_qp=num_msgs_per_qp, msg_size=msg_size, mtu=mtu) if send_gbn_checker.check_all_qps() == True and read_gbn_checker.check_all_qps() == True: result_logger.info("Go-Back-N state machine check passed for all qps.") else: result_logger.error("Go-Back-N state machine check failed") continue send_gbn_counter_check = send_gbn_checker.check_counters(sender_counter=requester_counter, receiver_counter=responder_counter) read_gbn_counter_check = read_gbn_checker.check_counters(sender_counter=responder_counter, receiver_counter=requester_counter) if send_gbn_counter_check == True and read_gbn_counter_check == True: result_logger.info("Go-Back-N counter check passed for all qps.") else: result_logger.error("Go-Back-N counter check failed") continue else: ## Check the traces and counters against the GBN state machine gbn_checker = gbn_check.GBNCheck(packet_list=packet_list, qp_info_list=qp_info_list, num_data_pkts=math.ceil(msg_size/mtu) * num_msgs_per_qp) if gbn_checker.check_all_qps() == True: result_logger.info("Go-Back-N state machine check passed for all qps.") else: result_logger.error("Go-Back-N state machine check failed") continue gbn_counter_check = gbn_checker.check_counters(sender_counter=requester_counter, receiver_counter=responder_counter) if gbn_counter_check == True: result_logger.info("Go-Back-N counter check passed.") else: result_logger.error("Go-Back-N counter check failed") continue ## Output the latency for undelivered packets num_qps = len(qp_info_list) for qp_index in range(num_qps): is_read = (rdma_verb == 'read') or (rdma_verb == 'send_read' and qp_index >= num_qps_send)
latency_measurement = LatencyMeasure(packet_list=packet_list,
6
2023-12-09 08:21:14+00:00
24k
ebb-earl-co/tidal-wave
tidal_wave/main.py
[ { "identifier": "login", "path": "tidal_wave/login.py", "snippet": "def login(\n audio_format: AudioFormat,\n) -> Tuple[Optional[requests.Session], Optional[AudioFormat]]:\n \"\"\"Given a selected audio_format, either log in \"automatically\"\n via the Fire TV OAuth 2.0 flow, or ask for an Andr...
from contextlib import closing from pathlib import Path from typing import Optional, Union from .login import login, AudioFormat, LogLevel from .album import Album from .artist import Artist from .mix import Mix from .playlist import Playlist from .track import Track from .video import Video from .models import ( match_tidal_url, TidalAlbum, TidalArtist, TidalMix, TidalPlaylist, TidalTrack, TidalVideo, ) from platformdirs import user_music_path from typing_extensions import Annotated import logging import typer
17,350
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack): track = Track(track_id=tidal_resource.tidal_id) track.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: track.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalAlbum): album = Album(album_id=tidal_resource.tidal_id) album.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: album.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalArtist):
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack): track = Track(track_id=tidal_resource.tidal_id) track.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: track.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalAlbum): album = Album(album_id=tidal_resource.tidal_id) album.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: album.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalArtist):
artist = Artist(artist_id=tidal_resource.tidal_id)
4
2023-12-12 21:50:25+00:00
24k
ZS-YANG/FemtoDet-v3
mmdet/models/utils/misc.py
[ { "identifier": "SampleList", "path": "mmdet/structures/det_data_sample.py", "snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_insta...
from functools import partial from typing import List, Optional, Sequence, Tuple, Union from mmengine.structures import InstanceData from mmengine.utils import digit_version from six.moves import map, zip from torch import Tensor from torch.autograd import Function from torch.nn import functional as F from mmdet.structures import SampleList from mmdet.structures.bbox import BaseBoxes, get_box_type, stack_boxes from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import OptInstanceList import numpy as np import torch
17,513
def empty_instances(batch_img_metas: List[dict], device: torch.device, task_type: str, instance_results: OptInstanceList = None, mask_thr_binary: Union[int, float] = 0, box_type: Union[str, type] = 'hbox', use_box_type: bool = False, num_classes: int = 80, score_per_cls: bool = False) -> List[InstanceData]: """Handle predicted instances when RoI is empty. Note: If ``instance_results`` is not None, it will be modified in place internally, and then return ``instance_results`` Args: batch_img_metas (list[dict]): List of image information. device (torch.device): Device of tensor. task_type (str): Expected returned task type. it currently supports bbox and mask. instance_results (list[:obj:`InstanceData`]): List of instance results. mask_thr_binary (int, float): mask binarization threshold. Defaults to 0. box_type (str or type): The empty box type. Defaults to `hbox`. use_box_type (bool): Whether to warp boxes with the box type. Defaults to False. num_classes (int): num_classes of bbox_head. Defaults to 80. score_per_cls (bool): Whether to generate classwise score for the empty instance. ``score_per_cls`` will be True when the model needs to produce raw results without nms. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image """ assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \ f' but got {task_type}' if instance_results is not None: assert len(instance_results) == len(batch_img_metas) results_list = [] for img_id in range(len(batch_img_metas)): if instance_results is not None: results = instance_results[img_id] assert isinstance(results, InstanceData) else: results = InstanceData() if task_type == 'bbox': _, box_type = get_box_type(box_type) bboxes = torch.zeros(0, box_type.box_dim, device=device) if use_box_type: bboxes = box_type(bboxes, clone=False) results.bboxes = bboxes score_shape = (0, num_classes + 1) if score_per_cls else (0, ) results.scores = torch.zeros(score_shape, device=device) results.labels = torch.zeros((0, ), device=device, dtype=torch.long) else: # TODO: Handle the case where rescale is false img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2] # the type of `im_mask` will be torch.bool or torch.uint8, # where uint8 if for visualization and debugging. im_mask = torch.zeros( 0, img_h, img_w, device=device, dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8) results.masks = im_mask results_list.append(results) return results_list def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """
# Copyright (c) OpenMMLab. All rights reserved. class SigmoidGeometricMean(Function): """Forward and backward function of geometric mean of two sigmoid functions. This implementation with analytical gradient function substitutes the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The original implementation incurs none during gradient backprapagation if both x and y are very small values. """ @staticmethod def forward(ctx, x, y): x_sigmoid = x.sigmoid() y_sigmoid = y.sigmoid() z = (x_sigmoid * y_sigmoid).sqrt() ctx.save_for_backward(x_sigmoid, y_sigmoid, z) return z @staticmethod def backward(ctx, grad_output): x_sigmoid, y_sigmoid, z = ctx.saved_tensors grad_x = grad_output * z * (1 - x_sigmoid) / 2 grad_y = grad_output * z * (1 - y_sigmoid) / 2 return grad_x, grad_y sigmoid_geometric_mean = SigmoidGeometricMean.apply def interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` to the shape of the `target`. The `source` must be a Tensor, but the `target` can be a Tensor or a np.ndarray with the shape (..., target_h, target_w). Args: source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or (N, C, H, W). target (Tensor | np.ndarray): The interpolation target with the shape (..., target_h, target_w). mode (str): Algorithm used for interpolation. The options are the same as those in F.interpolate(). Default: ``'bilinear'``. align_corners (bool): The same as the argument in F.interpolate(). Returns: Tensor: The interpolated source Tensor. """ assert len(target.shape) >= 2 def _interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` (4D) to the shape of the `target`.""" target_h, target_w = target.shape[-2:] source_h, source_w = source.shape[-2:] if target_h != source_h or target_w != source_w: source = F.interpolate( source, size=(target_h, target_w), mode=mode, align_corners=align_corners) return source if len(source.shape) == 3: source = source[:, None, :, :] source = _interpolate_as(source, target, mode, align_corners) return source[:, 0, :, :] else: return _interpolate_as(source, target, mode, align_corners) def unpack_gt_instances(batch_data_samples: SampleList) -> tuple: """Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based on ``batch_data_samples`` Args: batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple: - batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. - batch_gt_instances_ignore (list[:obj:`InstanceData`]): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. - batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. """ batch_gt_instances = [] batch_gt_instances_ignore = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) if 'ignored_instances' in data_sample: batch_gt_instances_ignore.append(data_sample.ignored_instances) else: batch_gt_instances_ignore.append(None) return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas def empty_instances(batch_img_metas: List[dict], device: torch.device, task_type: str, instance_results: OptInstanceList = None, mask_thr_binary: Union[int, float] = 0, box_type: Union[str, type] = 'hbox', use_box_type: bool = False, num_classes: int = 80, score_per_cls: bool = False) -> List[InstanceData]: """Handle predicted instances when RoI is empty. Note: If ``instance_results`` is not None, it will be modified in place internally, and then return ``instance_results`` Args: batch_img_metas (list[dict]): List of image information. device (torch.device): Device of tensor. task_type (str): Expected returned task type. it currently supports bbox and mask. instance_results (list[:obj:`InstanceData`]): List of instance results. mask_thr_binary (int, float): mask binarization threshold. Defaults to 0. box_type (str or type): The empty box type. Defaults to `hbox`. use_box_type (bool): Whether to warp boxes with the box type. Defaults to False. num_classes (int): num_classes of bbox_head. Defaults to 80. score_per_cls (bool): Whether to generate classwise score for the empty instance. ``score_per_cls`` will be True when the model needs to produce raw results without nms. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image """ assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \ f' but got {task_type}' if instance_results is not None: assert len(instance_results) == len(batch_img_metas) results_list = [] for img_id in range(len(batch_img_metas)): if instance_results is not None: results = instance_results[img_id] assert isinstance(results, InstanceData) else: results = InstanceData() if task_type == 'bbox': _, box_type = get_box_type(box_type) bboxes = torch.zeros(0, box_type.box_dim, device=device) if use_box_type: bboxes = box_type(bboxes, clone=False) results.bboxes = bboxes score_shape = (0, num_classes + 1) if score_per_cls else (0, ) results.scores = torch.zeros(score_shape, device=device) results.labels = torch.zeros((0, ), device=device, dtype=torch.long) else: # TODO: Handle the case where rescale is false img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2] # the type of `im_mask` will be torch.bool or torch.uint8, # where uint8 if for visualization and debugging. im_mask = torch.zeros( 0, img_h, img_w, device=device, dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8) results.masks = im_mask results_list.append(results) return results_list def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """
if isinstance(mask, (BitmapMasks, PolygonMasks)):
5
2023-12-11 15:23:03+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n ...
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
15,836
self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), ) lora_params, _ = inject_trainable_lora_extended( self.model.model, target_replace_module=set(target_replace_module), r=rank, loras=ckpt_fp, eval=eval, ) if not eval: self.require_grad_params += itertools.chain(*lora_params) return self def save_lora( self, ckpt_fp: str, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], ):
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), ) lora_params, _ = inject_trainable_lora_extended( self.model.model, target_replace_module=set(target_replace_module), r=rank, loras=ckpt_fp, eval=eval, ) if not eval: self.require_grad_params += itertools.chain(*lora_params) return self def save_lora( self, ckpt_fp: str, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], ):
save_lora_weight(
2
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n ...
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
16,059
self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append( MixedGroundingDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, ) ) def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask conv = conversation_lib.default_conversation.copy() targets = input_ids.clone() if conv_type == "llava_v1": sep = conv.sep + conv.roles[1] + ": " else: sep = "[/INST] " for conversation, target in zip(conversation_list, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep2) cur_len = 1 target[:cur_len] = IGNORE_INDEX for i, rou in enumerate(rounds): if rou == "": break parts = rou.split(sep) # if len(parts) != 2: # break assert len(parts) == 2, (len(parts), rou) parts[0] += sep if DEFAULT_IMAGE_TOKEN in conversation: round_len = len(tokenizer_image_token(rou, tokenizer)) instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 else: round_len = len(tokenizer(rou).input_ids) instruction_len = len(tokenizer(parts[0]).input_ids) - 2 target[cur_len : cur_len + instruction_len] = IGNORE_INDEX cur_len += round_len target[cur_len:] = IGNORE_INDEX if False: z = target.clone() z = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z) if local_rank == 0: print( "conversation: ", conversation, "tokenizer.decode(z): ", tokenizer.decode(z), ) if cur_len < tokenizer.model_max_length: assert cur_len == total_len if inferences[0] == False: truncate_len = tokenizer.model_max_length - 255 if input_ids.shape[1] > truncate_len: input_ids = input_ids[:, :truncate_len] targets = targets[:, :truncate_len] attention_masks = attention_masks[:, :truncate_len] return { "image_paths": image_path_list, "images": torch.stack(images_list, dim=0), "images_clip": torch.stack(images_clip_list, dim=0), "input_ids": input_ids, "labels": targets, "bboxes_labels_list": bboxes_labels_list, "bboxes_valid_list": torch.tensor(bboxes_valid_list), "masks_valid_list": masks_valid_list, "attention_masks": attention_masks, "masks_list": masks_list, "label_list": label_list, "resize_list": resize_list, "offset": torch.LongTensor(offset_list), "questions_list": questions_list, "sampled_classes_list": sampled_classes_list, "inference": inferences[0], "conversation_list": conversation_list, } class HybridDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append( MixedGroundingDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, ) ) def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits
refer_api = REFER(self.base_dir, ds, splitBy)
6
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/viewer/server/viewer_utils.py
[ { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the fo...
import base64 import enum import os import sys import threading import time import warnings import cv2 import numpy as np import torch from pathlib import Path from typing import Any, Dict, Optional, Tuple from cryptography.utils import CryptographyDeprecationWarning from rich.console import Console from nerfstudio.cameras.cameras import Cameras from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs import base_config as cfg from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.models.base_model import Model from nerfstudio.utils import colormaps, profiler, writer from nerfstudio.utils.decorators import check_main_thread, decorate_all from nerfstudio.utils.images import BasicImages from nerfstudio.utils.io import load_from_json, write_to_json from nerfstudio.utils.writer import GLOBAL_BUFFER, EventName, TimeWriter from nerfstudio.viewer.server.subprocess import run_viewer_bridge_server_as_subprocess from nerfstudio.viewer.server.utils import get_intrinsics_matrix_and_camera_to_world_h from nerfstudio.viewer.server.visualizer import Viewer
21,585
if self.prev_camera_matrix is not None and np.allclose(camera_object["matrix"], self.prev_camera_matrix): self.camera_moving = False else: self.prev_camera_matrix = camera_object["matrix"] self.camera_moving = True output_type = self.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.prev_output_type != output_type: self.camera_moving = True colormap_type = self.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.prev_colormap_type != colormap_type: self.camera_moving = True return camera_object def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Determines which colormap to use based on set colormap type Args: outputs: the output tensors for which to apply colormaps on colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list: reformatted_output = self._process_invalid_output(self.prev_output_type) # default for rgb images if self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].shape[-1] == 3: return outputs[reformatted_output] # rendering depth outputs if self.prev_colormap_type == ColormapTypes.DEPTH or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) > eps # handle floating point arithmetic ): accumulation_str = ( OutputTypes.ACCUMULATION if OutputTypes.ACCUMULATION in self.output_list else OutputTypes.ACCUMULATION_FINE ) return colormaps.apply_depth_colormap(outputs[reformatted_output], accumulation=outputs[accumulation_str]) # rendering accumulation outputs if self.prev_colormap_type == ColormapTypes.TURBO or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float ): return colormaps.apply_colormap(outputs[reformatted_output]) # rendering semantic outputs if self.prev_colormap_type == ColormapTypes.SEMANTIC or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.int ): logits = outputs[reformatted_output] labels = torch.argmax(torch.nn.functional.softmax(logits, dim=-1), dim=-1) # type: ignore assert colors is not None return colors[labels] # rendering boolean outputs if self.prev_colormap_type == ColormapTypes.BOOLEAN or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.bool ): return colormaps.apply_boolean_colormap(outputs[reformatted_output]) raise NotImplementedError def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Chooses the correct output and sends it to the viewer Args: outputs: the dictionary of outputs to choose from, from the graph colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list is None: self.output_list = list(outputs.keys()) viewer_output_list = list(np.copy(self.output_list)) # remapping rgb_fine -> rgb for all cases just so that we dont have 2 of them in the options if OutputTypes.RGB_FINE in self.output_list: viewer_output_list.remove(OutputTypes.RGB_FINE) viewer_output_list.insert(0, OutputTypes.RGB) self.vis["renderingState/output_options"].write(viewer_output_list) reformatted_output = self._process_invalid_output(self.prev_output_type) # re-register colormaps and send to viewer if self.output_type_changed or self.prev_colormap_type == ColormapTypes.INIT: self.prev_colormap_type = ColormapTypes.DEFAULT colormap_options = [ColormapTypes.DEFAULT] if ( outputs[reformatted_output].shape[-1] != 3 and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) <= eps # handle floating point arithmetic ): # accumulation can also include depth colormap_options.extend(["depth"]) self.output_type_changed = False self.vis["renderingState/colormap_choice"].write(self.prev_colormap_type) self.vis["renderingState/colormap_options"].write(colormap_options) selected_output = (self._apply_colormap(outputs, colors) * 255).type(torch.uint8) image = selected_output[..., [2, 1, 0]].cpu().numpy() data = cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 75])[1].tobytes() data = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii")) self.vis["render_img"].write(data) def _update_viewer_stats(self, render_time: float, num_rays: int, image_height: int, image_width: int) -> None: """Function that calculates and populates all the rendering statistics accordingly Args: render_time: total time spent rendering current view num_rays: number of rays rendered image_height: resolution of the current view image_width: resolution of the current view """
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to interface with the `vis/` (the JS viewer). """ from __future__ import annotations warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning) CONSOLE = Console(width=120) def get_viewer_version() -> str: """Get the version of the viewer.""" json_filename = os.path.join(os.path.dirname(__file__), "../app/package.json") version = load_from_json(Path(json_filename))["version"] return version @check_main_thread def setup_viewer(config: cfg.ViewerConfig, log_filename: Path): """Sets up the viewer if enabled Args: config: the configuration to instantiate viewer """ viewer_state = ViewerState(config, log_filename=log_filename) banner_messages = [f"Viewer at: {viewer_state.viewer_url}"] return viewer_state, banner_messages class OutputTypes(str, enum.Enum): """Noncomprehsnive list of output render types""" INIT = "init" RGB = "rgb" RGB_FINE = "rgb_fine" ACCUMULATION = "accumulation" ACCUMULATION_FINE = "accumulation_fine" class ColormapTypes(str, enum.Enum): """Noncomprehsnive list of colormap render types""" INIT = "init" DEFAULT = "default" TURBO = "turbo" DEPTH = "depth" SEMANTIC = "semantic" BOOLEAN = "boolean" class IOChangeException(Exception): """Basic camera exception to interrupt viewer""" class SetTrace: """Basic trace function""" def __init__(self, func): self.func = func def __enter__(self): sys.settrace(self.func) return self def __exit__(self, ext_type, exc_value, traceback): sys.settrace(None) class RenderThread(threading.Thread): """Thread that does all the rendering calls while listening for interrupts Args: state: current viewer state object graph: current checkpoint of model camera_ray_bundle: input rays to pass through the graph to render out """ def __init__(self, state: "ViewerState", graph: Model, camera_ray_bundle: RayBundle): threading.Thread.__init__(self) self.state = state self.graph = graph self.camera_ray_bundle = camera_ray_bundle self.exc = None self.vis_outputs = None def run(self): """run function that renders out images given the current graph and ray bundles. Interlaced with a trace function that checks to see if any I/O changes were registered. Exits and continues program if IOChangeException thrown. """ outputs = None try: with SetTrace(self.state.check_interrupt): with torch.no_grad(): outputs = self.graph.get_outputs_for_camera_ray_bundle(self.camera_ray_bundle) except Exception as e: # pylint: disable=broad-except self.exc = e if outputs: self.vis_outputs = outputs self.state.check_done_render = True self.state.check_interrupt_vis = False def join(self, timeout=None): threading.Thread.join(self) if self.exc: raise self.exc class CheckThread(threading.Thread): """Thread the constantly checks for io changes and sets a flag indicating interrupt Args: state: current viewer state object """ def __init__(self, state): threading.Thread.__init__(self) self.state = state def run(self): """Run function that checks to see if any of the existing state has changed (e.g. camera pose/output type/resolutions). Sets the viewer state flag to true to signal to render thread that an interrupt was registered. """ self.state.check_done_render = False while not self.state.check_done_render: # check camera data = self.state.vis["renderingState/camera"].read() if data is not None: camera_object = data["object"] if self.state.prev_camera_matrix is None or ( not np.allclose(camera_object["matrix"], self.state.prev_camera_matrix) and not self.state.prev_moving ): self.state.check_interrupt_vis = True self.state.prev_moving = True return self.state.prev_moving = False # check output type output_type = self.state.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.state.prev_output_type != output_type: self.state.check_interrupt_vis = True return # check colormap type colormap_type = self.state.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.state.prev_colormap_type != colormap_type: self.state.check_interrupt_vis = True return # check max render max_resolution = self.state.vis["renderingState/maxResolution"].read() if max_resolution is not None: if self.state.max_resolution != max_resolution: self.state.check_interrupt_vis = True return @decorate_all([check_main_thread]) class ViewerState: """Class to hold state for viewer variables Args: config: viewer setup configuration """ def __init__(self, config: cfg.ViewerConfig, log_filename: Path): self.config = config self.vis = None self.viewer_url = None self.log_filename = log_filename if self.config.launch_bridge_server: # start the viewer bridge server assert self.config.websocket_port is not None self.log_filename.parent.mkdir(exist_ok=True) zmq_port = run_viewer_bridge_server_as_subprocess( self.config.websocket_port, zmq_port=self.config.zmq_port, ip_address=self.config.ip_address, log_filename=str(self.log_filename), ) # TODO(ethan): log the output of the viewer bridge server in a file where the training logs go CONSOLE.line() version = get_viewer_version() websocket_url = f"ws://localhost:{self.config.websocket_port}" self.viewer_url = f"https://viewer.nerf.studio/versions/{version}/?websocket_url={websocket_url}" CONSOLE.rule(characters="=") CONSOLE.print(f"[Public] Open the viewer at {self.viewer_url}") CONSOLE.rule(characters="=") CONSOLE.line() self.vis = Viewer(zmq_port=zmq_port, ip_address=self.config.ip_address) else: assert self.config.zmq_port is not None self.vis = Viewer(zmq_port=self.config.zmq_port, ip_address=self.config.ip_address) # viewer specific variables self.prev_camera_matrix = None self.prev_output_type = OutputTypes.INIT self.prev_colormap_type = ColormapTypes.INIT self.prev_moving = False self.output_type_changed = True self.max_resolution = 1000 self.check_interrupt_vis = False self.check_done_render = True self.step = 0 self.static_fps = 1 self.moving_fps = 24 self.camera_moving = False self.prev_camera_timestamp = 0 self.probe_config = None self.output_list = None def _pick_drawn_image_idxs(self, total_num: int) -> list[int]: """Determine indicies of images to display in viewer. Args: total_num: total number of training images. Returns: List of indices from [0, total_num-1]. """ if self.config.max_num_display_images < 0: num_display_images = total_num else: num_display_images = min(self.config.max_num_display_images, total_num) # draw indices, roughly evenly spaced return np.linspace(0, total_num - 1, num_display_images, dtype=np.int32).tolist() def init_scene(self, dataset: InputDataset, start_train=True) -> None: """Draw some images and the scene aabb in the viewer. Args: dataset: dataset to render in the scene start_train: whether to start train when viewer init; if False, only displays dataset until resume train is toggled """ # set the config base dir self.vis["renderingState/config_base_dir"].write(str(self.log_filename.parents[0])) # clear the current scene self.vis["sceneState/sceneBox"].delete() self.vis["sceneState/cameras"].delete() # draw the training cameras and images image_indices = self._pick_drawn_image_idxs(len(dataset)) for idx in image_indices: image = dataset[idx]["image"] if isinstance(image, BasicImages): bgr = image.images[0][..., [2, 1, 0]] else: bgr = image[..., [2, 1, 0]] camera_json = dataset.cameras.to_json(camera_idx=idx, image=bgr, max_size=100) self.vis[f"sceneState/cameras/{idx:06d}"].write(camera_json) # draw the scene box (i.e., the bounding box) json_ = dataset.scene_box.to_json() self.vis["sceneState/sceneBox"].write(json_) # set the initial state whether to train or not self.vis["renderingState/isTraining"].write(start_train) # self.vis["renderingState/render_time"].write(str(0)) self.probe_config = dataset.cameras.probe_config # set the properties of the camera # self.vis["renderingState/camera"].write(json_) # set the main camera intrinsics to one from the dataset # K = camera.get_intrinsics_matrix() # set_persp_intrinsics_matrix(self.vis, K.double().numpy()) def _check_camera_path_payload(self, trainer, step: int): """Check to see if the camera path export button was pressed.""" # check if we should interrupt from a button press? camera_path_payload = self.vis["camera_path_payload"].read() if camera_path_payload: # save a model checkpoint trainer.save_checkpoint(step) # write to json file camera_path_filename = camera_path_payload["camera_path_filename"] + '.json' camera_path = camera_path_payload["camera_path"] write_to_json(Path(camera_path_filename), camera_path) self.vis["camera_path_payload"].delete() def update_scene(self, trainer, step: int, graph: Model, num_rays_per_batch: int) -> None: """updates the scene based on the graph weights Args: step: iteration step of training graph: the current checkpoint of the model """ has_temporal_distortion = getattr(graph, "temporal_distortion", None) is not None self.vis["model/has_temporal_distortion"].write(str(has_temporal_distortion).lower()) is_training = self.vis["renderingState/isTraining"].read() self.step = step self._check_camera_path_payload(trainer, step) camera_object = self._get_camera_object() if camera_object is None: return if is_training is None or is_training: # in training mode if self.camera_moving: # if the camera is moving, then we pause training and update camera continuously while self.camera_moving: self._render_image_in_viewer(camera_object, graph, is_training) camera_object = self._get_camera_object() else: # if the camera is not moving, then we approximate how many training steps need to be taken # to render at a FPS defined by self.static_fps. if EventName.TRAIN_RAYS_PER_SEC.value in GLOBAL_BUFFER["events"]: train_rays_per_sec = GLOBAL_BUFFER["events"][EventName.TRAIN_RAYS_PER_SEC.value]["avg"] target_train_util = self.vis["renderingState/targetTrainUtil"].read() if target_train_util is None: target_train_util = 0.9 batches_per_sec = train_rays_per_sec / num_rays_per_batch num_steps = max(int(1 / self.static_fps * batches_per_sec), 1) else: num_steps = 1 if step % num_steps == 0: self._render_image_in_viewer(camera_object, graph, is_training) else: # in pause training mode, enter render loop with set graph local_step = step run_loop = not is_training while run_loop: # if self._is_render_step(local_step) and step > 0: if step > 0: self._render_image_in_viewer(camera_object, graph, is_training) camera_object = self._get_camera_object() is_training = self.vis["renderingState/isTraining"].read() self._check_camera_path_payload(trainer, step) run_loop = not is_training local_step += 1 def check_interrupt(self, frame, event, arg): # pylint: disable=unused-argument """Raises interrupt when flag has been set and not already on lowest resolution. Used in conjunction with SetTrace. """ if event == "line": if self.check_interrupt_vis and not self.camera_moving: raise IOChangeException return self.check_interrupt def _get_camera_object(self): """Gets the camera object from the viewer and updates the movement state if it has changed.""" data = self.vis["renderingState/camera"].read() if data is None: return None camera_object = data["object"] if self.prev_camera_matrix is not None and np.allclose(camera_object["matrix"], self.prev_camera_matrix): self.camera_moving = False else: self.prev_camera_matrix = camera_object["matrix"] self.camera_moving = True output_type = self.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.prev_output_type != output_type: self.camera_moving = True colormap_type = self.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.prev_colormap_type != colormap_type: self.camera_moving = True return camera_object def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Determines which colormap to use based on set colormap type Args: outputs: the output tensors for which to apply colormaps on colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list: reformatted_output = self._process_invalid_output(self.prev_output_type) # default for rgb images if self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].shape[-1] == 3: return outputs[reformatted_output] # rendering depth outputs if self.prev_colormap_type == ColormapTypes.DEPTH or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) > eps # handle floating point arithmetic ): accumulation_str = ( OutputTypes.ACCUMULATION if OutputTypes.ACCUMULATION in self.output_list else OutputTypes.ACCUMULATION_FINE ) return colormaps.apply_depth_colormap(outputs[reformatted_output], accumulation=outputs[accumulation_str]) # rendering accumulation outputs if self.prev_colormap_type == ColormapTypes.TURBO or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float ): return colormaps.apply_colormap(outputs[reformatted_output]) # rendering semantic outputs if self.prev_colormap_type == ColormapTypes.SEMANTIC or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.int ): logits = outputs[reformatted_output] labels = torch.argmax(torch.nn.functional.softmax(logits, dim=-1), dim=-1) # type: ignore assert colors is not None return colors[labels] # rendering boolean outputs if self.prev_colormap_type == ColormapTypes.BOOLEAN or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.bool ): return colormaps.apply_boolean_colormap(outputs[reformatted_output]) raise NotImplementedError def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Chooses the correct output and sends it to the viewer Args: outputs: the dictionary of outputs to choose from, from the graph colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list is None: self.output_list = list(outputs.keys()) viewer_output_list = list(np.copy(self.output_list)) # remapping rgb_fine -> rgb for all cases just so that we dont have 2 of them in the options if OutputTypes.RGB_FINE in self.output_list: viewer_output_list.remove(OutputTypes.RGB_FINE) viewer_output_list.insert(0, OutputTypes.RGB) self.vis["renderingState/output_options"].write(viewer_output_list) reformatted_output = self._process_invalid_output(self.prev_output_type) # re-register colormaps and send to viewer if self.output_type_changed or self.prev_colormap_type == ColormapTypes.INIT: self.prev_colormap_type = ColormapTypes.DEFAULT colormap_options = [ColormapTypes.DEFAULT] if ( outputs[reformatted_output].shape[-1] != 3 and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) <= eps # handle floating point arithmetic ): # accumulation can also include depth colormap_options.extend(["depth"]) self.output_type_changed = False self.vis["renderingState/colormap_choice"].write(self.prev_colormap_type) self.vis["renderingState/colormap_options"].write(colormap_options) selected_output = (self._apply_colormap(outputs, colors) * 255).type(torch.uint8) image = selected_output[..., [2, 1, 0]].cpu().numpy() data = cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 75])[1].tobytes() data = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii")) self.vis["render_img"].write(data) def _update_viewer_stats(self, render_time: float, num_rays: int, image_height: int, image_width: int) -> None: """Function that calculates and populates all the rendering statistics accordingly Args: render_time: total time spent rendering current view num_rays: number of rays rendered image_height: resolution of the current view image_width: resolution of the current view """
writer.put_time(
7
2023-12-15 20:07:22+00:00
24k
amazon-science/c2f-seg
data/dataloader_transformer.py
[ { "identifier": "FishBowl", "path": "data/dataloader_Fishbowl.py", "snippet": "class FishBowl(object):\n def __init__(self, config, mode, subtest=None):\n self.datatype = mode\n data_dir = config.root_path\n\n self.img_path = os.path.join(data_dir, self.datatype+\"_data\", self.d...
from data.dataloader_Fishbowl import FishBowl from data.dataloader_MOViD_A import MOViD_A from data.dataloader_KINS import Kins_Fusion_dataset, KINS_Aisformer_VRSP_Intersection from data.dataloader_COCOA import COCOA_Fusion_dataset, COCOA_VRSP
21,265
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A":
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A":
train_dataset = MOViD_A(config, mode='train')
1
2023-12-21 04:25:47+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/qwen/modeling_qwen.py
[ { "identifier": "LookaheadPreTrainedModel", "path": "pia/lookahead/common/pretrained_model.py", "snippet": "class LookaheadPreTrainedModel(PreTrainedModel):\n _batch_generation = False\n _stream_generation = False\n\n def __init__(self, config):\n super().__init__(config=config)\n\n d...
import importlib import math import torch import torch.nn.functional as F import torch.utils.checkpoint import flash_attn from typing import TYPE_CHECKING, Optional, Tuple, Union, List, Any, Generator from torch.cuda.amp import autocast from torch.nn import CrossEntropyLoss from transformers import PreTrainedTokenizer, GenerationConfig from transformers.generation.logits_process import LogitsProcessorList from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from pia.lookahead.common.pretrained_model import LookaheadPreTrainedModel from transformers.utils import logging from einops import rearrange from torch import nn from .configuration_qwen import QWenConfig from .qwen_generation_utils import ( HistoryType, make_context, decode_tokens, get_stop_words_ids, StopWordsLogitsProcessor, ) from flash_attn.layers.rotary import apply_rotary_emb_func as __apply_rotary_emb_func from flash_attn.ops.rms_norm import rms_norm as __rms_norm from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func from flash_attn.flash_attn_interface import flash_attn_varlen_func as __flash_attn_unpadded_func from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig from einops import rearrange from einops import rearrange
17,805
position_ids = position_ids[:, -1].unsqueeze(-1) else: position_ids = None if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } ) return model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: return tuple( tuple( past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past ) for layer_past in past_key_values ) def chat( self, tokenizer: PreTrainedTokenizer, query: str, history: Optional[HistoryType], system: str = "You are a helpful assistant.", append_history: bool = True, stream: Optional[bool] = _SENTINEL, stop_words_ids: Optional[List[List[int]]] = None, generation_config: Optional[GenerationConfig] = None, **kwargs, ) -> Tuple[str, HistoryType]: generation_config = generation_config if generation_config is not None else self.generation_config assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT if history is None: history = [] if stop_words_ids is None: stop_words_ids = [] max_window_size = kwargs.get('max_window_size', None) if max_window_size is None: max_window_size = generation_config.max_window_size
# Copyright (c) Alibaba Cloud. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if TYPE_CHECKING: pass # from transformers.modeling_utils import PreTrainedModel try: except ImportError: rearrange = None SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ _ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED = """\ We detect you have activated flash attention support, but running model computation on CPU. Please make sure that your input data has been placed on GPU. If you actually want to run CPU computation, please following the readme and set device_map="cpu" to disable flash attention when loading the model (calling AutoModelForCausalLM.from_pretrained). 检测到您的模型已激活了flash attention支持,但正在执行CPU运算任务。如使用flash attention,请您确认模型输入已经传到GPU上。如果您确认要执行CPU运算,请您在载入模型(调用AutoModelForCausalLM.from_pretrained)时,按照readme说法,指定device_map="cpu"以禁用flash attention。 """ apply_rotary_emb_func = None rms_norm = None flash_attn_unpadded_func = None def _import_flash_attn(): global apply_rotary_emb_func, rms_norm, flash_attn_unpadded_func try: apply_rotary_emb_func = __apply_rotary_emb_func except ImportError: logger.warn( "Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency " "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary" ) try: rms_norm = __rms_norm except ImportError: logger.warn( "Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency " "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm" ) try: if not hasattr(flash_attn, '__version__'): else: if int(flash_attn.__version__.split(".")[0]) >= 2: else: flash_attn_unpadded_func = __flash_attn_unpadded_func except ImportError: logger.warn( "Warning: import flash_attn fail, please install FlashAttention to get higher efficiency " "https://github.com/Dao-AILab/flash-attention" ) class FlashSelfAttention(torch.nn.Module): def __init__( self, causal=False, softmax_scale=None, attention_dropout=0.0, ): super().__init__() assert flash_attn_unpadded_func is not None, ( "Please install FlashAttention first, " "e.g., with pip install flash-attn" ) assert ( rearrange is not None ), "Please install einops first, e.g., with pip install einops" self.causal = causal self.softmax_scale = softmax_scale self.dropout_p = attention_dropout def forward(self, q, k, v): assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v))) assert all((i.is_cuda for i in (q, k, v))) batch_size, seqlen_q = q.shape[0], q.shape[1] seqlen_k = k.shape[1] q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]] cu_seqlens_q = torch.arange( 0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q.device, ) if self.training: assert seqlen_k == seqlen_q is_causal = self.causal cu_seqlens_k = cu_seqlens_q else: is_causal = seqlen_q == seqlen_k cu_seqlens_k = torch.arange( 0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=q.device, ) self.dropout_p = 0 output = flash_attn_unpadded_func( q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, self.dropout_p, softmax_scale=self.softmax_scale, causal=is_causal, ) new_shape = (batch_size, output.shape[0] // batch_size) + output.shape[1:] output = output.view(new_shape) return output class QWenAttention(nn.Module): def __init__(self, config): super().__init__() self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) self.seq_length = config.seq_length self.hidden_size = config.hidden_size self.split_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads # self.use_flash_attn = config.use_flash_attn self.use_flash_attn = False self.scale_attn_weights = True self.projection_size = config.kv_channels * config.num_attention_heads assert self.projection_size % config.num_attention_heads == 0 self.hidden_size_per_attention_head = ( self.projection_size // config.num_attention_heads ) self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size) self.c_proj = nn.Linear( config.hidden_size, self.projection_size, bias=not config.no_bias ) self.is_fp32 = not (config.bf16 or config.fp16) if ( self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 ): self.core_attention_flash = FlashSelfAttention( causal=True, attention_dropout=config.attn_dropout_prob ) self.bf16 = config.bf16 self.use_dynamic_ntk = config.use_dynamic_ntk self.use_logn_attn = config.use_logn_attn logn_list = [ math.log(i, self.seq_length) if i > self.seq_length else 1 for i in range(1, 32768) ] self.logn_tensor = torch.tensor(logn_list)[None, :, None, None] self.attn_dropout = nn.Dropout(config.attn_dropout_prob) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) query_length, key_length = query.size(-2), key.size(-2) causal_mask = registered_causal_mask[ :, :, key_length - query_length: key_length, :key_length ] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to( attn_weights.device ) attn_weights = torch.where( causal_mask, attn_weights.to(attn_weights.dtype), mask_value ) attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def _upcast_and_reordered_attn( self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None ): bsz, num_heads, q_seq_len, dk = query.size() _, _, k_seq_len, _ = key.size() attn_weights = torch.empty( bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device, ) scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 with autocast(enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape( -1, dk, k_seq_len ) attn_weights = torch.baddbmm( attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor ) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) query_length, key_length = query.size(-2), key.size(-2) causal_mask = registered_causal_mask[ :, :, key_length - query_length: key_length, :key_length ] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to( attn_weights.device ) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if attn_weights.dtype != torch.float32: raise RuntimeError( "Error with upcasting, attn_weights does not have dtype torch.float32" ) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def _split_heads(self, tensor, num_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor def _merge_heads(self, tensor, num_heads, attn_head_size): tensor = tensor.contiguous() new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) return tensor.view(new_shape) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) if ( self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 and query.is_cuda ): q, k, v = query, key, value context_layer = self.core_attention_flash(q, k, v) # b s h d -> b s (h d) context_layer = context_layer.flatten(2, 3).contiguous() else: query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) if ( registered_causal_mask is None and self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 and not query.is_cuda ): raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: if ( self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 ): raise ValueError("Cannot output attentions while using flash-attn") else: outputs += (attn_weight,) return outputs class QWenMLP(nn.Module): def __init__(self, config): super().__init__() self.w1 = nn.Linear( config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias ) self.w2 = nn.Linear( config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias ) ff_dim_in = config.intermediate_size // 2 self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias) def forward(self, hidden_states): a1 = self.w1(hidden_states) a2 = self.w2(hidden_states) intermediate_parallel = a1 * F.silu(a2) output = self.c_proj(intermediate_parallel) return output class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.mlp = QWenMLP(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): layernorm_output = self.ln_1(hidden_states) attn_outputs = self.attn( layernorm_output, rotary_pos_emb, registered_causal_mask=registered_causal_mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] residual = hidden_states layernorm_input = attn_output + residual layernorm_output = self.ln_2(layernorm_input) residual = layernorm_input mlp_output = self.mlp(layernorm_output) hidden_states = residual + mlp_output if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class QWenPreTrainedModel(LookaheadPreTrainedModel): config_class = QWenConfig base_model_prefix = "transformer" is_parallelizable = False supports_gradient_checkpointing = True _no_split_modules = ["QWenBlock"] def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, RMSNorm): module.weight.data.fill_(1.0) for name, p in module.named_parameters(): if name == "c_proj.weight": p.data.normal_( mean=0.0, std=( self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers) ), ) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, QWenModel): module.gradient_checkpointing = value class QWenModel(QWenPreTrainedModel): _keys_to_ignore_on_load_missing = ["attn.masked_bias"] def __init__(self, config): super().__init__(config) self.vocab_size = config.vocab_size self.num_hidden_layers = config.num_hidden_layers self.embed_dim = config.hidden_size self.gradient_checkpointing = False self.use_dynamic_ntk = config.use_dynamic_ntk self.seq_length = config.seq_length self.wte = nn.Embedding(self.vocab_size, self.embed_dim) self.drop = nn.Dropout(config.emb_dropout_prob) if config.rotary_pct == 1.0: self.rotary_ndims = None else: assert config.rotary_pct < 1 self.rotary_ndims = int( config.kv_channels * config.rotary_pct ) dim = ( self.rotary_ndims if self.rotary_ndims is not None else config.kv_channels ) self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base) # self.use_flash_attn = config.use_flash_attn self.use_flash_attn = False self.is_fp32 = not (config.bf16 or config.fp16) if ( self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 ): self.registered_causal_mask = None else: max_positions = config.max_position_embeddings self.register_buffer( "registered_causal_mask", torch.tril( torch.ones((max_positions, max_positions), dtype=torch.bool) ).view(1, 1, max_positions, max_positions), persistent=False, ) self.h = nn.ModuleList( [ QWenBlock( config ) for i in range(config.num_hidden_layers) ] ) self.ln_f = RMSNorm( self.embed_dim, eps=config.layer_norm_epsilon, ) self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() # input_ids = input_ids.view(-1, input_shape[-1]) batch_size, seq_length = input_ids.shape # batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if position_ids is not None: position_ids = position_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) # NOTE: adapt for lookahead if attention_mask is not None and len(attention_mask.shape) == 4: # with lookahead position_ids = torch.sum(attention_mask, dim=-1).squeeze(1) - 1 attention_mask = (1.0 - attention_mask.to(inputs_embeds.dtype)) * torch.finfo(inputs_embeds.dtype).min else: # without lookahead if position_ids is None: position_ids = torch.arange( past_length, input_shape[-1] + past_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask[:, None, None, :] attention_mask = attention_mask.to(dtype=self.dtype) attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # if inputs_embeds is None: # inputs_embeds = self.wte(input_ids) hidden_states = inputs_embeds kv_seq_len = hidden_states.size()[1] if past_key_values[0] is not None: # past key values[0][0] shape: bs * seq_len * head_num * dim kv_seq_len += past_key_values[0][0].shape[1] if ( self.use_dynamic_ntk and kv_seq_len == hidden_states.size()[1] and not self.training ): context_value = math.log(kv_seq_len / self.seq_length, 2) + 1 ntk_alpha = 2 ** math.ceil(context_value) - 1 ntk_alpha = max(ntk_alpha, 1) else: ntk_alpha = self.rotary_emb._ntk_alpha_cached rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for idx in range(len(rotary_pos_emb)): rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device) hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache, output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, rotary_pos_emb, self.registered_causal_mask, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, ) else: outputs = block( hidden_states, layer_past=layer_past, rotary_pos_emb=rotary_pos_emb, registered_causal_mask=self.registered_causal_mask, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class QWenLMHeadModel(QWenPreTrainedModel): _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"] _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"] def __init__(self, config): super().__init__(config) assert ( config.bf16 + config.fp16 + config.fp32 <= 1 ), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true" autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0 if autoset_precision: if SUPPORT_BF16: logger.warn( "The model is automatically converting to bf16 for faster inference. " "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"." ) config.bf16 = True elif SUPPORT_FP16: logger.warn( "The model is automatically converting to fp16 for faster inference. " "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"." ) config.fp16 = True else: config.fp32 = True if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16: logger.warn( "Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".") if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16: logger.warn( "Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster") if config.fp32: if SUPPORT_BF16: logger.warn( "Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".") elif SUPPORT_FP16: logger.warn( "Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".") if config.use_flash_attn == "auto": if config.bf16 or config.fp16: logger.warn("Try importing flash-attention for faster inference...") config.use_flash_attn = True else: config.use_flash_attn = False if config.use_flash_attn and config.fp32: logger.warn("Flash attention will be disabled because it does NOT support fp32.") if config.use_flash_attn: _import_flash_attn() self.transformer = QWenModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) if config.bf16: self.transformer.bfloat16() self.lm_head.bfloat16() if config.fp16: self.transformer.half() self.lm_head.half() self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs ): token_type_ids = kwargs.get("token_type_ids", None) if past_key_values: input_ids = input_ids[:, -1].unsqueeze(-1) if token_type_ids is not None: token_type_ids = token_type_ids[:, -1].unsqueeze(-1) attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) else: position_ids = None if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } ) return model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: return tuple( tuple( past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past ) for layer_past in past_key_values ) def chat( self, tokenizer: PreTrainedTokenizer, query: str, history: Optional[HistoryType], system: str = "You are a helpful assistant.", append_history: bool = True, stream: Optional[bool] = _SENTINEL, stop_words_ids: Optional[List[List[int]]] = None, generation_config: Optional[GenerationConfig] = None, **kwargs, ) -> Tuple[str, HistoryType]: generation_config = generation_config if generation_config is not None else self.generation_config assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT if history is None: history = [] if stop_words_ids is None: stop_words_ids = [] max_window_size = kwargs.get('max_window_size', None) if max_window_size is None: max_window_size = generation_config.max_window_size
raw_text, context_tokens = make_context(
2
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(...
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
18,001
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential(
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
0
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,555
evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg) add_wandb_config(cfg) add_prerocessing_training_set_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) add_repeat_factors(cfg) cfg.freeze() default_setup(cfg, args) if not args.eval_only:
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg) add_wandb_config(cfg) add_prerocessing_training_set_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) add_repeat_factors(cfg) cfg.freeze() default_setup(cfg, args) if not args.eval_only:
setup_wandb(cfg, args)
11
2023-12-15 15:40:58+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
[ { "identifier": "LayerNorm", "path": "multi_part_assembly/utils/wx_transformer_utilities/layer_norm.py", "snippet": "def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):\n if not export and torch.cuda.is_available() and has_fused_layernorm:\n return FusedLayerNorm(...
from typing import Dict, List, Optional from .layer_norm import LayerNorm from .multihead_attention import MultiheadAttention from .relational_memory import RelationalMemory from .group_linear_layer import GroupLinearLayer from .basic_mha import MemoryAttention from .quant_noise import quant_noise from .fairseq_dropout import FairseqDropout from torch import Tensor import torch import torch.nn as nn import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils import random import torch.nn.functional as F
15,532
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) if out_proj is not None: self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj) else: self.final_linear = None def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args):
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) if out_proj is not None: self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj) else: self.final_linear = None def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
1
2023-12-15 13:13:01+00:00
24k
m-abr/FCPCodebase
world/World.py
[ { "identifier": "Logger", "path": "logs/Logger.py", "snippet": "class Logger():\n _folder = None\n\n def __init__(self, is_enabled:bool, topic:str) -> None:\n self.no_of_entries = 0 \n self.enabled = is_enabled\n self.topic = topic\n\n def write(self, msg:str, timestamp:boo...
from collections import deque from cpp.ball_predictor import ball_predictor from cpp.localization import localization from logs.Logger import Logger from math import atan2, pi from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Draw import Draw from world.commons.Other_Robot import Other_Robot from world.Robot import Robot import numpy as np
19,012
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool, enable_draw:bool, logger:Logger, host:str) -> None: self.team_name = team_name # Name of our team self.team_name_opponent : str = None # Name of opponent team self.apply_play_mode_correction = apply_play_mode_correction # True to adjust ball position according to play mode self.step = 0 # Total number of received simulation steps (always in sync with self.time_local_ms) self.time_server = 0.0 # Time, in seconds, as indicated by the server (this time is NOT reliable, use only for synchronization between agents) self.time_local_ms = 0 # Reliable simulation time in milliseconds, use this when possible (it is incremented 20ms for every TCP message) self.time_game = 0.0 # Game time, in seconds, as indicated by the server self.goals_scored = 0 # Goals score by our team self.goals_conceded = 0 # Goals conceded by our team self.team_side_is_left : bool = None # True if our team plays on the left side (this value is later changed by the world parser) self.play_mode = None # Play mode of the soccer game, provided by the server self.play_mode_group = None # Certain play modes share characteristics, so it makes sense to group them self.flags_corners : dict = None # corner flags, key=(x,y,z), always assume we play on the left side self.flags_posts : dict = None # goal posts, key=(x,y,z), always assume we play on the left side self.ball_rel_head_sph_pos = np.zeros(3) # Ball position relative to head (spherical coordinates) (m, deg, deg) self.ball_rel_head_cart_pos = np.zeros(3) # Ball position relative to head (cartesian coordinates) (m) self.ball_rel_torso_cart_pos = np.zeros(3) # Ball position relative to torso (cartesian coordinates) (m) self.ball_rel_torso_cart_pos_history = deque(maxlen=20) # Ball position relative to torso history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos = np.zeros(3) # Ball absolute position (up to date if self.ball_is_visible and self.robot.loc_is_up_to_date) (m) self.ball_abs_pos_history = deque(maxlen=20) # Ball absolute position history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos_last_update = 0 # World.time_local_ms when self.ball_abs_pos was last updated by vision or radio self.ball_abs_vel = np.zeros(3) # Ball velocity vector based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead get_ball_abs_vel) self.ball_abs_speed = 0 # Ball scalar speed based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead ||get_ball_abs_vel||) self.ball_is_visible = False # True if the last server message contained vision information related to the ball self.is_ball_abs_pos_from_vision = False # True if ball_abs_pos originated from vision, False if it originated from radio self.ball_last_seen = 0 # World.time_local_ms when ball was last seen (note: may be different from self.ball_abs_pos_last_update) self.ball_cheat_abs_pos = np.zeros(3) # Absolute ball position provided by the server as cheat (m) self.ball_cheat_abs_vel = np.zeros(3) # Absolute velocity vector based on the last 2 values of self.ball_cheat_abs_pos (m/s) self.ball_2d_pred_pos = np.zeros((1,2)) # prediction of current and future 2D ball positions* self.ball_2d_pred_vel = np.zeros((1,2)) # prediction of current and future 2D ball velocities* self.ball_2d_pred_spd = np.zeros(1) # prediction of current and future 2D ball linear speeds* # *at intervals of 0.02 s until ball comes to a stop or gets out of bounds (according to prediction) self.lines = np.zeros((30,6)) # Position of visible lines, relative to head, start_pos+end_pos (spherical coordinates) (m, deg, deg, m, deg, deg) self.line_count = 0 # Number of visible lines self.vision_last_update = 0 # World.time_local_ms when last vision update was received self.vision_is_up_to_date = False # True if the last server message contained vision information self.teammates = [Other_Robot(i, True ) for i in range(1,12)] # List of teammates, ordered by unum self.opponents = [Other_Robot(i, False) for i in range(1,12)] # List of opponents, ordered by unum self.teammates[unum-1].is_self = True # This teammate is self
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool, enable_draw:bool, logger:Logger, host:str) -> None: self.team_name = team_name # Name of our team self.team_name_opponent : str = None # Name of opponent team self.apply_play_mode_correction = apply_play_mode_correction # True to adjust ball position according to play mode self.step = 0 # Total number of received simulation steps (always in sync with self.time_local_ms) self.time_server = 0.0 # Time, in seconds, as indicated by the server (this time is NOT reliable, use only for synchronization between agents) self.time_local_ms = 0 # Reliable simulation time in milliseconds, use this when possible (it is incremented 20ms for every TCP message) self.time_game = 0.0 # Game time, in seconds, as indicated by the server self.goals_scored = 0 # Goals score by our team self.goals_conceded = 0 # Goals conceded by our team self.team_side_is_left : bool = None # True if our team plays on the left side (this value is later changed by the world parser) self.play_mode = None # Play mode of the soccer game, provided by the server self.play_mode_group = None # Certain play modes share characteristics, so it makes sense to group them self.flags_corners : dict = None # corner flags, key=(x,y,z), always assume we play on the left side self.flags_posts : dict = None # goal posts, key=(x,y,z), always assume we play on the left side self.ball_rel_head_sph_pos = np.zeros(3) # Ball position relative to head (spherical coordinates) (m, deg, deg) self.ball_rel_head_cart_pos = np.zeros(3) # Ball position relative to head (cartesian coordinates) (m) self.ball_rel_torso_cart_pos = np.zeros(3) # Ball position relative to torso (cartesian coordinates) (m) self.ball_rel_torso_cart_pos_history = deque(maxlen=20) # Ball position relative to torso history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos = np.zeros(3) # Ball absolute position (up to date if self.ball_is_visible and self.robot.loc_is_up_to_date) (m) self.ball_abs_pos_history = deque(maxlen=20) # Ball absolute position history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos_last_update = 0 # World.time_local_ms when self.ball_abs_pos was last updated by vision or radio self.ball_abs_vel = np.zeros(3) # Ball velocity vector based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead get_ball_abs_vel) self.ball_abs_speed = 0 # Ball scalar speed based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead ||get_ball_abs_vel||) self.ball_is_visible = False # True if the last server message contained vision information related to the ball self.is_ball_abs_pos_from_vision = False # True if ball_abs_pos originated from vision, False if it originated from radio self.ball_last_seen = 0 # World.time_local_ms when ball was last seen (note: may be different from self.ball_abs_pos_last_update) self.ball_cheat_abs_pos = np.zeros(3) # Absolute ball position provided by the server as cheat (m) self.ball_cheat_abs_vel = np.zeros(3) # Absolute velocity vector based on the last 2 values of self.ball_cheat_abs_pos (m/s) self.ball_2d_pred_pos = np.zeros((1,2)) # prediction of current and future 2D ball positions* self.ball_2d_pred_vel = np.zeros((1,2)) # prediction of current and future 2D ball velocities* self.ball_2d_pred_spd = np.zeros(1) # prediction of current and future 2D ball linear speeds* # *at intervals of 0.02 s until ball comes to a stop or gets out of bounds (according to prediction) self.lines = np.zeros((30,6)) # Position of visible lines, relative to head, start_pos+end_pos (spherical coordinates) (m, deg, deg, m, deg, deg) self.line_count = 0 # Number of visible lines self.vision_last_update = 0 # World.time_local_ms when last vision update was received self.vision_is_up_to_date = False # True if the last server message contained vision information self.teammates = [Other_Robot(i, True ) for i in range(1,12)] # List of teammates, ordered by unum self.opponents = [Other_Robot(i, False) for i in range(1,12)] # List of opponents, ordered by unum self.teammates[unum-1].is_self = True # This teammate is self
self.draw = Draw(enable_draw, unum, host, 32769) # Draw object for current player
2
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/lut.py
[ { "identifier": "ColorSpace", "path": "src/tinycio/colorspace.py", "snippet": "class ColorSpace:\n \"\"\"\n Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are \n treated as distinct color spaces. Example:\n\n .. highlight:: python...
import typing import os import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from typing import Union from enum import IntEnum from contextlib import nullcontext from .colorspace import ColorSpace from .fsio.lutfile import load_lut, save_lut, _infer_lut_file_format, _generate_linear_cube_lut from .fsio.format import LUTFormat from .util.colorutil import srgb_luminance from .util.miscutil import trilinear_interpolation from .loss import feature_moments_calculation
17,698
from __future__ import annotations class LookupTable: """ Color lookup table. Example: .. highlight:: python .. code-block:: python lut = LookupTable.get_negative() im_negative = lut.apply(im) :param size: Size of the LUT. :param lattice: Lattice as tensor (defaults to linear). :param lut_format: Format of the LUT. """ size = 32 lattice = None
from __future__ import annotations class LookupTable: """ Color lookup table. Example: .. highlight:: python .. code-block:: python lut = LookupTable.get_negative() im_negative = lut.apply(im) :param size: Size of the LUT. :param lattice: Lattice as tensor (defaults to linear). :param lut_format: Format of the LUT. """ size = 32 lattice = None
lut_format= LUTFormat.UNKNOWN
5
2023-12-15 15:39:08+00:00
24k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,...
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
15,775
verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, # controlnet: ControlNetModel, # processors: List[ControlNetProcessor], scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, # controlnet1=processors[0], scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition1, condition2, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # Prepare first condition condition1 = torch.from_numpy(condition1.copy()).to(device=device, dtype=dtype) / 255.0 condition1 = torch.stack([condition1 for _ in range(num_videos_per_prompt)], dim=0) condition1 = rearrange(condition1, 'b f h w c -> (b f) c h w').clone() # Prepare second condition condition2 = torch.from_numpy(condition2.copy()).to(device=device, dtype=dtype) / 255.0 condition2 = torch.stack([condition2 for _ in range(num_videos_per_prompt)], dim=0) condition2 = rearrange(condition2, 'b f h w c -> (b f) c h w').clone() # Here, we're averaging the two conditions combined_condition = (condition1*8+condition2*2)/10 if do_classifier_free_guidance: combined_condition = torch.cat([combined_condition] * 2) #combined_condition = torch.from_numpy(combined_condition.copy()).to(device=device, dtype=dtype) return combined_condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f)
5
2023-12-15 01:22:37+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connection.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-in...
import datetime import logging import os import re import socket import sys import typing import warnings import ssl from http.client import HTTPConnection as _HTTPConnection from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from socket import timeout as SocketTimeout from typing import Literal from .response import HTTPResponse from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT from .util.ssltransport import SSLTransport from ._collections import HTTPHeaderDict from .util.response import assert_header_parsing from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout from .util.util import to_str from .util.wait import wait_for_read from ._base_connection import _TYPE_BODY from ._base_connection import ProxyConfig as ProxyConfig from ._base_connection import _ResponseOptions as _ResponseOptions from ._version import __version__ from .exceptions import ( ConnectTimeoutError, HeaderParsingError, NameResolutionError, NewConnectionError, ProxyError, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ from .util.request import body_to_chunks from .util.ssl_ import assert_fingerprint as _assert_fingerprint from .util.ssl_ import ( create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname from .util.url import Url from .response import HTTPResponse
15,841
( f"System time is way off (before {RECENT_DATE}). This will probably " "lead to SSL verification errors" ), SystemTimeWarning, ) sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock=sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, server_hostname=server_hostname, ssl_context=self.ssl_context, tls_in_tls=tls_in_tls, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ) self.sock = sock_and_verified.socket self.is_verified = sock_and_verified.is_verified # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket: """ Establish a TLS connection to the proxy using the provided SSL context. """ # `_connect_tls_proxy` is called when self._tunnel_host is truthy. proxy_config = typing.cast(ProxyConfig, self.proxy_config) ssl_context = proxy_config.ssl_context sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=hostname, ssl_context=ssl_context, assert_hostname=proxy_config.assert_hostname, assert_fingerprint=proxy_config.assert_fingerprint, # Features that aren't implemented for proxies yet: cert_file=None, key_file=None, key_password=None, tls_in_tls=False, ) self.proxy_is_verified = sock_and_verified.is_verified return sock_and_verified.socket # type: ignore[return-value] class _WrappedAndVerifiedSocket(typing.NamedTuple): """ Wrapped socket and whether the connection is verified after the TLS handshake """ socket: ssl.SSLSocket | SSLTransport is_verified: bool def _ssl_wrap_socket_and_match_hostname( sock: socket.socket, *, cert_reqs: None | str | int, ssl_version: None | str | int, ssl_minimum_version: int | None, ssl_maximum_version: int | None, cert_file: str | None, key_file: str | None, key_password: str | None, ca_certs: str | None, ca_cert_dir: str | None, ca_cert_data: None | str | bytes, assert_hostname: None | str | Literal[False], assert_fingerprint: str | None, server_hostname: str | None, ssl_context: ssl.SSLContext | None, tls_in_tls: bool = False, ) -> _WrappedAndVerifiedSocket: """Logic for constructing an SSLContext from all TLS parameters, passing that down into ssl_wrap_socket, and then doing certificate verification either via hostname or fingerprint. This function exists to guarantee that both proxies and targets have the same behavior when connecting via TLS. """ default_ssl_context = False if ssl_context is None: default_ssl_context = True context = create_urllib3_context( ssl_version=resolve_ssl_version(ssl_version), ssl_minimum_version=ssl_minimum_version, ssl_maximum_version=ssl_maximum_version, cert_reqs=resolve_cert_reqs(cert_reqs), ) else: context = ssl_context context.verify_mode = resolve_cert_reqs(cert_reqs) # In some cases, we want to verify hostnames ourselves if ( # `ssl` can't verify fingerprints or alternate hostnames assert_fingerprint or assert_hostname # assert_hostname can be set to False to disable hostname checking or assert_hostname is False # We still support OpenSSL 1.0.2, which prevents us from verifying # hostnames easily: https://github.com/pyca/pyopenssl/pull/933
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError BrokenPipeError = BrokenPipeError log = logging.getLogger(__name__) port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") _HAS_SYS_AUDIT = hasattr(sys, "audit") class HTTPConnection(_HTTPConnection): """ Based on :class:`http.client.HTTPConnection` but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``source_address``: Set the source address for the current connection. - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass: .. code-block:: python HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) ] #: Whether this connection verifies the host's certificate. is_verified: bool = False #: Whether this proxy connection verified the proxy host's certificate. # If no proxy is currently connected to the value will be ``None``. proxy_is_verified: bool | None = None blocksize: int source_address: tuple[str, int] | None socket_options: connection._TYPE_SOCKET_OPTIONS | None _has_connected_to_proxy: bool _response_options: _ResponseOptions | None _tunnel_host: str | None _tunnel_port: int | None _tunnel_scheme: str | None def __init__( self, host: str, port: int | None = None, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, ) -> None: super().__init__( host=host, port=port, timeout=Timeout.resolve_default_timeout(timeout), source_address=source_address, blocksize=blocksize, ) self.socket_options = socket_options self.proxy = proxy self.proxy_config = proxy_config self._has_connected_to_proxy = False self._response_options = None self._tunnel_host: str | None = None self._tunnel_port: int | None = None self._tunnel_scheme: str | None = None # https://github.com/python/mypy/issues/4125 # Mypy treats this as LSP violation, which is considered a bug. # If `host` is made a property it violates LSP, because a writeable attribute is overridden with a read-only one. # However, there is also a `host` setter so LSP is not violated. # Potentially, a `@host.deleter` might be needed depending on how this issue will be fixed. @property def host(self) -> str: """ Getter method to remove any trailing dots that indicate the hostname is an FQDN. In general, SSL certificates don't include the trailing dot indicating a fully-qualified domain name, and thus, they don't validate properly when checked against a domain name that includes the dot. In addition, some servers may not expect to receive the trailing dot when provided. However, the hostname with trailing dot is critical to DNS resolution; doing a lookup with the trailing dot will properly only resolve the appropriate FQDN, whereas a lookup without a trailing dot will search the system's search domain list. Thus, it's important to keep the original host around for use only in those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ return self._dns_host.rstrip(".") @host.setter def host(self, value: str) -> None: """ Setter for the `host` property. We assume that only urllib3 uses the _dns_host attribute; httplib itself only uses `host`, and it seems reasonable that other libraries follow suit. """ self._dns_host = value def _new_conn(self) -> socket.socket: """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ try: sock = connection.create_connection( (self._dns_host, self.port), self.timeout, source_address=self.source_address, socket_options=self.socket_options, ) except socket.gaierror as e: raise NameResolutionError(self.host, self, e) from e except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except OSError as e: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e # Audit hooks are only available in Python 3.8+ if _HAS_SYS_AUDIT: sys.audit("http.client.connect", self, self.host, self.port) return sock def set_tunnel( self, host: str, port: int | None = None, headers: typing.Mapping[str, str] | None = None, scheme: str = "http", ) -> None: if scheme not in ("http", "https"): raise ValueError( f"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'" ) super().set_tunnel(host, port=port, headers=headers) self._tunnel_scheme = scheme def connect(self) -> None: self.sock = self._new_conn() if self._tunnel_host: # If we're tunneling it means we're connected to our proxy. self._has_connected_to_proxy = True # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # type: ignore[attr-defined] # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) @property def is_closed(self) -> bool: return self.sock is None @property def is_connected(self) -> bool: if self.sock is None: return False return not wait_for_read(self.sock, timeout=0.0) @property def has_connected_to_proxy(self) -> bool: return self._has_connected_to_proxy def close(self) -> None: try: super().close() finally: # Reset all stateful properties so connection # can be re-used without leaking prior configs. self.sock = None self.is_verified = False self.proxy_is_verified = None self._has_connected_to_proxy = False self._response_options = None self._tunnel_host = None self._tunnel_port = None self._tunnel_scheme = None def putrequest( self, method: str, url: str, skip_host: bool = False, skip_accept_encoding: bool = False, ) -> None: """""" # Empty docstring because the indentation of CPython's implementation # is broken but we don't want this method in our documentation. match = _CONTAINS_CONTROL_CHAR_RE.search(method) if match: raise ValueError( f"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})" ) return super().putrequest( method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding ) def putheader(self, header: str, *values: str) -> None: """""" if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): super().putheader(header, *values) elif to_str(header.lower()) not in SKIPPABLE_HEADERS: skippable_headers = "', '".join( [str.title(header) for header in sorted(SKIPPABLE_HEADERS)] ) raise ValueError( f"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'" ) # `request` method's signature intentionally violates LSP. # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental. def request( # type: ignore[override] self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, *, chunked: bool = False, preload_content: bool = True, decode_content: bool = True, enforce_content_length: bool = True, ) -> None: # Update the inner socket's timeout value to send the request. # This only triggers if the connection is re-used. if self.sock is not None: self.sock.settimeout(self.timeout) # Store these values to be fed into the HTTPResponse # object later. TODO: Remove this in favor of a real # HTTP lifecycle mechanism. # We have to store these before we call .request() # because sometimes we can still salvage a response # off the wire even if we aren't able to completely # send the request body. self._response_options = _ResponseOptions( request_method=method, request_url=url, preload_content=preload_content, decode_content=decode_content, enforce_content_length=enforce_content_length, ) if headers is None: headers = {} header_keys = frozenset(to_str(k.lower()) for k in headers) skip_accept_encoding = "accept-encoding" in header_keys skip_host = "host" in header_keys self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) # Transform the body into an iterable of sendall()-able chunks # and detect if an explicit Content-Length is doable. chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize) chunks = chunks_and_cl.chunks content_length = chunks_and_cl.content_length # When chunked is explicit set to 'True' we respect that. if chunked: if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") else: # Detect whether a framing mechanism is already in use. If so # we respect that value, otherwise we pick chunked vs content-length # depending on the type of 'body'. if "content-length" in header_keys: chunked = False elif "transfer-encoding" in header_keys: chunked = True # Otherwise we go off the recommendation of 'body_to_chunks()'. else: chunked = False if content_length is None: if chunks is not None: chunked = True self.putheader("Transfer-Encoding", "chunked") else: self.putheader("Content-Length", str(content_length)) # Now that framing headers are out of the way we send all the other headers. if "user-agent" not in header_keys: self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) self.endheaders() # If we're given a body we start sending that in chunks. if chunks is not None: for chunk in chunks: # Sending empty chunks isn't allowed for TE: chunked # as it indicates the end of the body. if not chunk: continue if isinstance(chunk, str): chunk = chunk.encode("utf-8") if chunked: self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk)) else: self.send(chunk) # Regardless of whether we have a body or not, if we're in # chunked mode we want to send an explicit empty chunk. if chunked: self.send(b"0\r\n\r\n") def request_chunked( self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, ) -> None: """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ warnings.warn( "HTTPConnection.request_chunked() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).", category=DeprecationWarning, stacklevel=2, ) self.request(method, url, body=body, headers=headers, chunked=True) def getresponse( # type: ignore[override] self, ) -> HTTPResponse: """ Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # Raise the same error as http.client.HTTPConnection if self._response_options is None: raise ResponseNotReady() # Reset this attribute for being used again. resp_options = self._response_options self._response_options = None # Since the connection's timeout value may have been updated # we need to set the timeout on the socket. self.sock.settimeout(self.timeout) # This is needed here to avoid circular import errors # Get the response from http.client.HTTPConnection httplib_response = super().getresponse() try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: log.warning( "Failed to parse headers (url=%s): %s", _url_from_connection(self, resp_options.request_url), hpe, exc_info=True, ) headers = HTTPHeaderDict(httplib_response.msg.items()) response = HTTPResponse( body=httplib_response, headers=headers, status=httplib_response.status, version=httplib_response.version, reason=httplib_response.reason, preload_content=resp_options.preload_content, decode_content=resp_options.decode_content, original_response=httplib_response, enforce_content_length=resp_options.enforce_content_length, request_method=resp_options.request_method, request_url=resp_options.request_url, ) return response class HTTPSConnection(HTTPConnection): """ Many of the parameters to this constructor are passed to the underlying SSL socket by means of :py:func:`urllib3.util.ssl_wrap_socket`. """ default_port = port_by_scheme["https"] # type: ignore[misc] cert_reqs: int | str | None = None ca_certs: str | None = None ca_cert_dir: str | None = None ca_cert_data: None | str | bytes = None ssl_version: int | str | None = None ssl_minimum_version: int | None = None ssl_maximum_version: int | None = None assert_fingerprint: str | None = None def __init__( self, host: str, port: int | None = None, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, cert_reqs: int | str | None = None, assert_hostname: None | str | Literal[False] = None, assert_fingerprint: str | None = None, server_hostname: str | None = None, ssl_context: ssl.SSLContext | None = None, ca_certs: str | None = None, ca_cert_dir: str | None = None, ca_cert_data: None | str | bytes = None, ssl_minimum_version: int | None = None, ssl_maximum_version: int | None = None, ssl_version: int | str | None = None, # Deprecated cert_file: str | None = None, key_file: str | None = None, key_password: str | None = None, ) -> None: super().__init__( host, port=port, timeout=timeout, source_address=source_address, blocksize=blocksize, socket_options=socket_options, proxy=proxy, proxy_config=proxy_config, ) self.key_file = key_file self.cert_file = cert_file self.key_password = key_password self.ssl_context = ssl_context self.server_hostname = server_hostname self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ssl_version = ssl_version self.ssl_minimum_version = ssl_minimum_version self.ssl_maximum_version = ssl_maximum_version self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data # cert_reqs depends on ssl_context so calculate last. if cert_reqs is None: if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode else: cert_reqs = resolve_cert_reqs(None) self.cert_reqs = cert_reqs def set_cert( self, key_file: str | None = None, cert_file: str | None = None, cert_reqs: int | str | None = None, key_password: str | None = None, ca_certs: str | None = None, assert_hostname: None | str | Literal[False] = None, assert_fingerprint: str | None = None, ca_cert_dir: str | None = None, ca_cert_data: None | str | bytes = None, ) -> None: """ This method should only be called once, before the connection is used. """ warnings.warn( "HTTPSConnection.set_cert() is deprecated and will be removed " "in urllib3 v2.1.0. Instead provide the parameters to the " "HTTPSConnection constructor.", category=DeprecationWarning, stacklevel=2, ) # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also # have an SSLContext object in which case we'll use its verify_mode. if cert_reqs is None: if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode else: cert_reqs = resolve_cert_reqs(None) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.key_password = key_password self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data def connect(self) -> None: sock: socket.socket | ssl.SSLSocket self.sock = sock = self._new_conn() server_hostname: str = self.host tls_in_tls = False # Do we need to establish a tunnel? if self._tunnel_host is not None: # We're tunneling to an HTTPS origin so need to do TLS-in-TLS. if self._tunnel_scheme == "https": self.sock = sock = self._connect_tls_proxy(self.host, sock) tls_in_tls = True # If we're tunneling it means we're connected to our proxy. self._has_connected_to_proxy = True self._tunnel() # type: ignore[attr-defined] # Override the host with the one we're requesting data from. server_hostname = self._tunnel_host if self.server_hostname is not None: server_hostname = self.server_hostname is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn( ( f"System time is way off (before {RECENT_DATE}). This will probably " "lead to SSL verification errors" ), SystemTimeWarning, ) sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock=sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, server_hostname=server_hostname, ssl_context=self.ssl_context, tls_in_tls=tls_in_tls, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ) self.sock = sock_and_verified.socket self.is_verified = sock_and_verified.is_verified # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket: """ Establish a TLS connection to the proxy using the provided SSL context. """ # `_connect_tls_proxy` is called when self._tunnel_host is truthy. proxy_config = typing.cast(ProxyConfig, self.proxy_config) ssl_context = proxy_config.ssl_context sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=hostname, ssl_context=ssl_context, assert_hostname=proxy_config.assert_hostname, assert_fingerprint=proxy_config.assert_fingerprint, # Features that aren't implemented for proxies yet: cert_file=None, key_file=None, key_password=None, tls_in_tls=False, ) self.proxy_is_verified = sock_and_verified.is_verified return sock_and_verified.socket # type: ignore[return-value] class _WrappedAndVerifiedSocket(typing.NamedTuple): """ Wrapped socket and whether the connection is verified after the TLS handshake """ socket: ssl.SSLSocket | SSLTransport is_verified: bool def _ssl_wrap_socket_and_match_hostname( sock: socket.socket, *, cert_reqs: None | str | int, ssl_version: None | str | int, ssl_minimum_version: int | None, ssl_maximum_version: int | None, cert_file: str | None, key_file: str | None, key_password: str | None, ca_certs: str | None, ca_cert_dir: str | None, ca_cert_data: None | str | bytes, assert_hostname: None | str | Literal[False], assert_fingerprint: str | None, server_hostname: str | None, ssl_context: ssl.SSLContext | None, tls_in_tls: bool = False, ) -> _WrappedAndVerifiedSocket: """Logic for constructing an SSLContext from all TLS parameters, passing that down into ssl_wrap_socket, and then doing certificate verification either via hostname or fingerprint. This function exists to guarantee that both proxies and targets have the same behavior when connecting via TLS. """ default_ssl_context = False if ssl_context is None: default_ssl_context = True context = create_urllib3_context( ssl_version=resolve_ssl_version(ssl_version), ssl_minimum_version=ssl_minimum_version, ssl_maximum_version=ssl_maximum_version, cert_reqs=resolve_cert_reqs(cert_reqs), ) else: context = ssl_context context.verify_mode = resolve_cert_reqs(cert_reqs) # In some cases, we want to verify hostnames ourselves if ( # `ssl` can't verify fingerprints or alternate hostnames assert_fingerprint or assert_hostname # assert_hostname can be set to False to disable hostname checking or assert_hostname is False # We still support OpenSSL 1.0.2, which prevents us from verifying # hostnames easily: https://github.com/pyca/pyopenssl/pull/933
or ssl_.IS_PYOPENSSL
18
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\...
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,698
'%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True)
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True)
filename_template = Template(image_name_template)
5
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
plugins/default_fine_tuning_method.py
[ { "identifier": "settings", "path": "embedding_studio/core/config.py", "snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n F...
from typing import List from sentence_transformers import SentenceTransformer from embedding_studio.core.config import settings from embedding_studio.core.plugin import FineTuningMethod from embedding_studio.embeddings.data.clickstream.parsers.s3_parser import ( AWSS3ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.search_event import ( DummyEventType, SearchResult, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.clickstream.text_query_item import ( TextQueryItem, ) from embedding_studio.embeddings.data.clickstream.text_query_retriever import ( TextQueryRetriever, ) from embedding_studio.embeddings.data.loaders.s3.s3_loader import ( AWSS3DataLoader, ) from embedding_studio.embeddings.data.storages.producers.clip import ( CLIPItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.embeddings.losses.prob_cosine_margin_ranking_loss import ( CosineProbMarginRankingLoss, ) from embedding_studio.embeddings.models.text_to_image.clip import ( TextToImageCLIPModel, ) from embedding_studio.models.clickstream.sessions import SessionWithEvents from embedding_studio.models.plugin import FineTuningBuilder, PluginMeta from embedding_studio.workers.fine_tuning.data.prepare_data import prepare_data from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.initial_params.clip import ( INITIAL_PARAMS, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
16,533
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser(
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser(
TextQueryItem, SearchResult, DummyEventType
4
2023-10-31 00:33:13+00:00
24k
facebookresearch/minimax
src/minimax/runners/xp_runner.py
[ { "identifier": "EvalRunner", "path": "src/minimax/runners/eval_runner.py", "snippet": "class EvalRunner:\n def __init__(\n self,\n pop,\n env_names,\n env_kwargs=None,\n n_episodes=10,\n agent_idxs='*',\n render_mode=None):\n\n self.pop = pop\n...
import copy import time import numpy as np import jax import minimax.envs as envs import minimax.models as models import minimax.agents as agents from functools import partial from collections import defaultdict from jax.sharding import Mesh, PartitionSpec as P from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map from .eval_runner import EvalRunner from .dr_runner import DRRunner from .paired_runner import PAIREDRunner from .plr_runner import PLRRunner from minimax.util.rl import UEDScore, PopPLRManager
16,361
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo(
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo(
runner_cls=DRRunner,
1
2023-10-28 12:12:01+00:00
24k
nv-tlabs/vid2player3d
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings...
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
18,606
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000
task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
7
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
MSTH/SpaceTimeHashing/permute_field.py
[ { "identifier": "RaySamples", "path": "nerfstudio/cameras/rays.py", "snippet": "class RaySamples(TensorDataclass):\n \"\"\"Samples along a ray\"\"\"\n\n frustums: Frustums\n \"\"\"Frustums along ray.\"\"\"\n camera_indices: Optional[TensorType[\"bs\":..., 1]] = None\n \"\"\"Camera index.\...
import torch import numpy as np import tinycudann as tcnn from typing import * from nerfacc import ContractionType, contract from torch.nn.parameter import Parameter from torchtyping import TensorType from dataclasses import dataclass, field from nerfstudio.cameras.rays import RaySamples, Frustums, RayBundle from nerfstudio.data.scene_box import SceneBox from nerfstudio.field_components.activations import trunc_exp from nerfstudio.field_components.embedding import Embedding from nerfstudio.field_components.field_heads import FieldHeadNames from nerfstudio.fields.base_field import Field from nerfstudio.model_components.ray_samplers import ( ProposalNetworkSampler, UniformSampler, ) from nerfstudio.model_components.renderers import ( AccumulationRenderer, DepthRenderer, NormalsRenderer, RGBRenderer, ) from nerfstudio.field_components.spatial_distortions import ( SceneContraction, SpatialDistortion, ) from nerfstudio.model_components.losses import ( MSELoss, distortion_loss, interlevel_loss, orientation_loss, pred_normal_loss, ) from torchmetrics import PeakSignalNoiseRatio from torchmetrics.functional import structural_similarity_index_measure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from nerfstudio.field_components.spatial_distortions import SceneContraction from nerfstudio.fields.density_fields import HashMLPDensityField from nerfstudio.model_components.scene_colliders import NearFarCollider from nerfstudio.model_components.shaders import NormalsShader from nerfstudio.models.base_model import Model, ModelConfig from nerfstudio.utils import colormaps from nerfstudio.cameras.rays import RayBundle from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from MSTH.SpaceTimeHashing.ray_samplers import ProposalNetworkSamplerSpatial, spacetime, spacetime_concat from rich.console import Console
17,600
CONSOLE = Console(width=120) class SpaceTimeDensityFieldWithPermutation(Field): def __init__( self, aabb: TensorType, num_layers: int = 2, hidden_dim: int = 64, spatial_distortion: Optional[SpatialDistortion] = None, use_linear: bool = False, num_levels: int = 8, max_res: int = 1024, base_res: int = 16, log2_hashmap_size: int = 18, features_per_level: int = 2, ) -> None: super().__init__() self.register_buffer("aabb", aabb) self.spatial_distortion = spatial_distortion self.use_linear = use_linear growth_factor = np.exp((np.log(max_res) - np.log(base_res)) / (num_levels - 1)) self.register_buffer("max_res", torch.tensor(max_res)) self.register_buffer("num_levels", torch.tensor(num_levels)) self.register_buffer("log2_hashmap_size", torch.tensor(log2_hashmap_size)) config = { "encoding": { "otype": "HashGrid", "n_levels": num_levels, "n_features_per_level": features_per_level, "log2_hashmap_size": log2_hashmap_size, "base_resolution": base_res, "per_level_scale": growth_factor, }, "network": { "otype": "FullyFusedMLP", "activation": "ReLU", "output_activation": "None", "n_neurons": hidden_dim, "n_hidden_layers": num_layers - 1, }, } if not self.use_linear: self.mlp_base = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.spatial_mlp_base = tcnn.NetworkWithInputEncoding( n_input_dims=3, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xyzt = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xtyz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xytz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.txyz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) else: self.encoding = tcnn.Encoding(n_input_dims=4, encoding_config=config["encoding"]) self.linear = torch.nn.Linear(self.encoding.n_output_dims, 1)
CONSOLE = Console(width=120) class SpaceTimeDensityFieldWithPermutation(Field): def __init__( self, aabb: TensorType, num_layers: int = 2, hidden_dim: int = 64, spatial_distortion: Optional[SpatialDistortion] = None, use_linear: bool = False, num_levels: int = 8, max_res: int = 1024, base_res: int = 16, log2_hashmap_size: int = 18, features_per_level: int = 2, ) -> None: super().__init__() self.register_buffer("aabb", aabb) self.spatial_distortion = spatial_distortion self.use_linear = use_linear growth_factor = np.exp((np.log(max_res) - np.log(base_res)) / (num_levels - 1)) self.register_buffer("max_res", torch.tensor(max_res)) self.register_buffer("num_levels", torch.tensor(num_levels)) self.register_buffer("log2_hashmap_size", torch.tensor(log2_hashmap_size)) config = { "encoding": { "otype": "HashGrid", "n_levels": num_levels, "n_features_per_level": features_per_level, "log2_hashmap_size": log2_hashmap_size, "base_resolution": base_res, "per_level_scale": growth_factor, }, "network": { "otype": "FullyFusedMLP", "activation": "ReLU", "output_activation": "None", "n_neurons": hidden_dim, "n_hidden_layers": num_layers - 1, }, } if not self.use_linear: self.mlp_base = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.spatial_mlp_base = tcnn.NetworkWithInputEncoding( n_input_dims=3, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xyzt = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xtyz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xytz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.txyz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) else: self.encoding = tcnn.Encoding(n_input_dims=4, encoding_config=config["encoding"]) self.linear = torch.nn.Linear(self.encoding.n_output_dims, 1)
def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType, None]:
0
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kw...
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
20,037
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, ) gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, )
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, ) gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, )
schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)
6
2023-10-30 02:53:38+00:00
24k
nv-tlabs/pacer
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings...
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
18,248
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True )
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True )
plot_skeleton_motion_interactive(skel_motion)
4
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n...
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
14,864
self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}')
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}')
urdf_files = get_all_files_with_name(dataset_path, name='model.urdf')
2
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/activation_resampler/tests/test_activation_resampler.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then p...
from jaxtyping import Float, Int64 from torch import Tensor from torch.nn import Parameter from sparse_autoencoder.activation_resampler.activation_resampler import ActivationResampler from sparse_autoencoder.activation_store.base_store import ActivationStore from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder, SparseAutoencoderConfig from sparse_autoencoder.loss.decoded_activations_l2 import L2ReconstructionLoss from sparse_autoencoder.loss.learned_activations_l1 import LearnedActivationsL1Loss from sparse_autoencoder.loss.reducer import LossReducer from sparse_autoencoder.tensor_types import Axis import pytest import torch
16,481
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture() def autoencoder_model() -> SparseAutoencoder: """Create a dummy autoencoder model.""" return SparseAutoencoder(
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture() def autoencoder_model() -> SparseAutoencoder: """Create a dummy autoencoder model.""" return SparseAutoencoder(
SparseAutoencoderConfig(
4
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.paddi...
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
21,137
if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT": model = ProteinNPTModel(args, alphabet) elif args.model_type in ["MSA_Transformer_pred", "ESM1v_pred", "Tranception_pred", "TranceptEVE_pred", "Linear_Embedding_pred", "DeepSequence_pred"]: model = AugmentedPropertyPredictor(args, alphabet) if args.frozen_embedding_parameters and args.aa_embeddings in ["MSA_Transformer", "ESM1v", "Tranception"]: for para in model.aa_embedding.parameters(): para.requires_grad = False # List of assays involved in training if num_targets==1: # Single property prediction assay_file_names={ target_names[0]: assay_file_name } if "zero_shot_fitness_predictions" in target_names_input: assay_file_names["zero_shot_fitness_predictions"] = assay_file_name else: # Multiple properties prediction assay_file_names={} for target in target_names_input: if target=="zero_shot_fitness_predictions": assay_file_names[target] = assay_file_name # The name of the zero-shot prediction file matches that of the main assay else: assay_file_names[target] = assay_reference_file[target][assay_reference_file["DMS_id"]==assay_id].values[0] # Load training, val and test data UniProt_id = assay_reference_file["UniProt_ID"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_filename = assay_reference_file["MSA_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_weights_filename = assay_reference_file["weight_file_name"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_start_position = int(assay_reference_file["MSA_start"][assay_reference_file["DMS_id"]==assay_id].values[0]) MSA_end_position = int(assay_reference_file["MSA_end"][assay_reference_file["DMS_id"]==assay_id].values[0]) train_data, val_data, test_data, target_processing = get_train_val_test_data(args = args, assay_file_names = assay_file_names) MSA_sequences, MSA_weights = process_MSA(args, MSA_filename, MSA_weights_filename) if args.aa_embeddings=="MSA_Transformer" else (None, None) if args.use_wandb: combined_dict = {**vars(args), "parameter_count": sum(p.numel() for p in model.parameters()), "world_size": world_size, "assay_id": assay_id, "UniProt_id": UniProt_id} wandb.init(project=os.getenv("WANDB_PROJECT"), config=combined_dict, name=model_name, dir=args.wandb_location, save_code=True) # Define trainer
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} for target_name in target_names: print("Spearman {} target: {}".format(target_name,spearmans[target_name])) test_logs['Test Spearman '+target_name] = spearmans[target_name] if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT": model = ProteinNPTModel(args, alphabet) elif args.model_type in ["MSA_Transformer_pred", "ESM1v_pred", "Tranception_pred", "TranceptEVE_pred", "Linear_Embedding_pred", "DeepSequence_pred"]: model = AugmentedPropertyPredictor(args, alphabet) if args.frozen_embedding_parameters and args.aa_embeddings in ["MSA_Transformer", "ESM1v", "Tranception"]: for para in model.aa_embedding.parameters(): para.requires_grad = False # List of assays involved in training if num_targets==1: # Single property prediction assay_file_names={ target_names[0]: assay_file_name } if "zero_shot_fitness_predictions" in target_names_input: assay_file_names["zero_shot_fitness_predictions"] = assay_file_name else: # Multiple properties prediction assay_file_names={} for target in target_names_input: if target=="zero_shot_fitness_predictions": assay_file_names[target] = assay_file_name # The name of the zero-shot prediction file matches that of the main assay else: assay_file_names[target] = assay_reference_file[target][assay_reference_file["DMS_id"]==assay_id].values[0] # Load training, val and test data UniProt_id = assay_reference_file["UniProt_ID"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_filename = assay_reference_file["MSA_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_weights_filename = assay_reference_file["weight_file_name"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_start_position = int(assay_reference_file["MSA_start"][assay_reference_file["DMS_id"]==assay_id].values[0]) MSA_end_position = int(assay_reference_file["MSA_end"][assay_reference_file["DMS_id"]==assay_id].values[0]) train_data, val_data, test_data, target_processing = get_train_val_test_data(args = args, assay_file_names = assay_file_names) MSA_sequences, MSA_weights = process_MSA(args, MSA_filename, MSA_weights_filename) if args.aa_embeddings=="MSA_Transformer" else (None, None) if args.use_wandb: combined_dict = {**vars(args), "parameter_count": sum(p.numel() for p in model.parameters()), "world_size": world_size, "assay_id": assay_id, "UniProt_id": UniProt_id} wandb.init(project=os.getenv("WANDB_PROJECT"), config=combined_dict, name=model_name, dir=args.wandb_location, save_code=True) # Define trainer
trainer = Trainer(
9
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: ...
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
15,619
# pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/media/test/4d846cae-2315-4928-8d1b-ca6d3a61a3c6/DroneVehicle/val/raw/images/', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[840], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.2, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/media/test/4d846cae-2315-4928-8d1b-ca6d3a61a3c6/DroneVehicle/val/raw/images/', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[840], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.2, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(FILE.stem, opt)
14
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @a...
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
18,005
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys) elif algorithm_name == Algorithm.Benaloh: cs = Benaloh(keys=keys) elif algorithm_name == Algorithm.NaccacheStern: cs = NaccacheStern(keys=keys) elif algorithm_name == Algorithm.GoldwasserMicali: cs = GoldwasserMicali(keys=keys) else: raise ValueError(f"unimplemented algorithm - {algorithm_name}") self.cs: Homomorphic = cs def __str__(self) -> str: return f"Ciphertext({self.value})" def __repr__(self) -> str: return f"Ciphertext({self.value})" def __add__(self, other: "Ciphertext") -> "Ciphertext": """ Perform homomorphic addition methods Args: other (Ciperhtext): some other ciphertext Returns: ciphertext (Ciphertext): homomorphic addition of ciphertext """ if self.cs.keys.get("public_key") is None: raise ValueError("You must have public key to perform homomorphic addition") result = self.cs.add(ciphertext1=self.value, ciphertext2=other.value) return Ciphertext(algorithm_name=self.algorithm_name, keys=self.keys, value=result) def __mul__(self, other: Union["Ciphertext", int, float]) -> "Ciphertext": """ Perform homomorphic multiplication or multiply a ciphertext with a known constant Args: other (int | float | Ciphertext): a known plain constant of some other ciphertext Returns homomorphic multiplication of ciphertexts | scalar multiplication of ciphertext """ if self.cs.keys.get("public_key") is None: raise ValueError("You must have public key to perform homomorphic multiplication") if isinstance(other, Ciphertext): # Handle multiplication with another EncryptedObject result = self.cs.multiply(ciphertext1=self.value, ciphertext2=other.value) elif isinstance(other, int): result = self.cs.multiply_by_contant(ciphertext=self.value, constant=other) elif isinstance(other, float):
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys) elif algorithm_name == Algorithm.Benaloh: cs = Benaloh(keys=keys) elif algorithm_name == Algorithm.NaccacheStern: cs = NaccacheStern(keys=keys) elif algorithm_name == Algorithm.GoldwasserMicali: cs = GoldwasserMicali(keys=keys) else: raise ValueError(f"unimplemented algorithm - {algorithm_name}") self.cs: Homomorphic = cs def __str__(self) -> str: return f"Ciphertext({self.value})" def __repr__(self) -> str: return f"Ciphertext({self.value})" def __add__(self, other: "Ciphertext") -> "Ciphertext": """ Perform homomorphic addition methods Args: other (Ciperhtext): some other ciphertext Returns: ciphertext (Ciphertext): homomorphic addition of ciphertext """ if self.cs.keys.get("public_key") is None: raise ValueError("You must have public key to perform homomorphic addition") result = self.cs.add(ciphertext1=self.value, ciphertext2=other.value) return Ciphertext(algorithm_name=self.algorithm_name, keys=self.keys, value=result) def __mul__(self, other: Union["Ciphertext", int, float]) -> "Ciphertext": """ Perform homomorphic multiplication or multiply a ciphertext with a known constant Args: other (int | float | Ciphertext): a known plain constant of some other ciphertext Returns homomorphic multiplication of ciphertexts | scalar multiplication of ciphertext """ if self.cs.keys.get("public_key") is None: raise ValueError("You must have public key to perform homomorphic multiplication") if isinstance(other, Ciphertext): # Handle multiplication with another EncryptedObject result = self.cs.multiply(ciphertext1=self.value, ciphertext2=other.value) elif isinstance(other, int): result = self.cs.multiply_by_contant(ciphertext=self.value, constant=other) elif isinstance(other, float):
constant = phe_utils.parse_int(value=other, modulo=self.cs.plaintext_modulo)
11
2023-10-28 14:57:59+00:00
24k
chenran-li/RQL-release
stable_baselines3/sac_residual/sac_residual.py
[ { "identifier": "ReplayBuffer", "path": "stable_baselines3/common/buffers.py", "snippet": "class ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation ...
from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union from gym import spaces from torch.nn import functional as F from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_parameters_by_name, polyak_update from stable_baselines3.sac_residual.policies import ResidualCnnPolicy, ResidualMlpPolicy, ResidualMultiInputPolicy, ResidualSACPolicy from stable_baselines3.sac.sac import SAC import numpy as np import torch as th
17,049
target_update_interval=target_update_interval, target_entropy=target_entropy, use_sde=use_sde, sde_sample_freq=sde_sample_freq, use_sde_at_warmup=use_sde_at_warmup, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, seed=seed, device=device, _init_setup_model=_init_setup_model, ) def _setup_model(self) -> None: super()._setup_model() self.policy.prior_model = SAC.load(self.prior_model_path,env=self.env) self.policy.prior_model.policy.set_training_mode(False) # freeze prior model parameters def train(self, gradient_steps: int, batch_size: int = 64) -> None: # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update optimizers learning rate optimizers = [self.actor.optimizer, self.critic.optimizer] if self.ent_coef_optimizer is not None: optimizers += [self.ent_coef_optimizer] # Update learning rate according to lr schedule self._update_learning_rate(optimizers) ent_coef_losses, ent_coefs = [], [] actor_losses, critic_losses = [], [] for gradient_step in range(gradient_steps): # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) # We need to sample because `log_std` may have changed between two gradient steps if self.use_sde: self.actor.reset_noise() # Action by the current actor for the sampled state actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations) log_prob = log_prob.reshape(-1, 1) # SAC and GAIL prior policy prior_log_prob = self.policy.prior_model.actor.give_action_log_prob(replay_data.observations, actions_pi) prior_log_prob = prior_log_prob.reshape(-1, 1) ent_coef_loss = None if self.ent_coef_optimizer is not None: # Important: detach the variable from the graph # so we don't change it with other losses # see https://github.com/rail-berkeley/softlearning/issues/60 ent_coef = th.exp(self.log_ent_coef.detach()) ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean() ent_coef_losses.append(ent_coef_loss.item()) else: ent_coef = self.ent_coef_tensor ent_coefs.append(ent_coef.item()) # pre reward normalize to current reward. ent_coef_prior = self.policy.prior_model.ent_coef if not isinstance(self.policy.prior_model.ent_coef, str) else ent_coef if ent_coef_prior == 0: ent_coef_prior = ent_coef if self.num_timesteps < self.warmstarting_num_timesteps: ent_coef_prior = self.warmstarting_scale * ent_coef_prior # Optimize entropy coefficient, also called # entropy temperature or alpha in the paper if ent_coef_loss is not None: self.ent_coef_optimizer.zero_grad() ent_coef_loss.backward() self.ent_coef_optimizer.step() with th.no_grad(): # Select action according to policy next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) next_log_prob = next_log_prob.reshape(-1, 1) # SAC and GAIL next_prior_log_prob = self.policy.prior_model.actor.give_action_log_prob(replay_data.next_observations, next_actions) next_prior_log_prob = next_prior_log_prob.reshape(-1, 1) # Compute the next Q values: min over all critics targets next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1) next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True) # add entropy term and prior policy logprob next_q_values = next_q_values + ent_coef_prior * next_prior_log_prob - ent_coef * next_log_prob # td error + entropy term target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values # Get current Q-values estimates for each critic network # using action from the replay buffer current_q_values = self.critic(replay_data.observations, replay_data.actions) # Compute critic loss critic_loss = 0.5 * sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values) critic_losses.append(critic_loss.item()) # Optimize the critic self.critic.optimizer.zero_grad() critic_loss.backward() self.critic.optimizer.step() # Compute actor loss # Alternative: actor_loss = th.mean(log_prob - qf1_pi) # Min over all critic networks q_values_pi = th.cat(self.critic(replay_data.observations, actions_pi), dim=1) min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True) actor_loss = (ent_coef * log_prob - min_qf_pi - ent_coef_prior * prior_log_prob).mean() actor_losses.append(actor_loss.item()) # Optimize the actor self.actor.optimizer.zero_grad() actor_loss.backward() self.actor.optimizer.step() # Update target networks if gradient_step % self.target_update_interval == 0:
SelfResidualSAC = TypeVar("SelfResidualSAC", bound="ResidualSAC") class ResidualSAC(SAC): """ Residual Soft Actor-Critic (SAC) Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor, This implementation borrows code from original implementation (https://github.com/haarnoja/sac) from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo (https://github.com/rail-berkeley/softlearning/) and from Stable Baselines (https://github.com/hill-a/stable-baselines) Paper: https://arxiv.org/abs/1801.01290 Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html Note: we use double q target and not value target as discussed in https://github.com/hill-a/stable-baselines/issues/270 :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: learning rate for adam optimizer, the same learning rate will be used for all networks (Q-Values, Actor and Value function) it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param ent_coef: Entropy regularization coefficient. (Equivalent to inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) :param target_update_interval: update the target network every ``target_network_update_freq`` gradient steps. :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``) :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling during the warm up phase (before learning starts) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messages :param seed: Seed for the pseudo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": ResidualMlpPolicy, "CnnPolicy": ResidualCnnPolicy, "MultiInputPolicy": ResidualMultiInputPolicy, } def __init__( self, policy: Union[str, Type[ResidualSACPolicy]], env: Union[GymEnv, str], prior_model_path: str, learning_rate: Union[float, Schedule] = 3e-4, buffer_size: int = 1_000_000, # 1e6 learning_starts: int = 100, batch_size: int = 256, tau: float = 0.005, gamma: float = 0.99, train_freq: Union[int, Tuple[int, str]] = 1, gradient_steps: int = 1, warmstarting_num_timesteps: int = 0, warmstarting_scale: int = 10, action_noise: Optional[ActionNoise] = None, replay_buffer_class: Optional[Type[ReplayBuffer]] = None, replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, ent_coef: Union[str, float] = "auto", target_update_interval: int = 1, target_entropy: Union[str, float] = "auto", use_sde: bool = False, sde_sample_freq: int = -1, use_sde_at_warmup: bool = False, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, seed: Optional[int] = None, device: Union[th.device, str] = "auto", _init_setup_model: bool = True, ): self.warmstarting_num_timesteps = warmstarting_num_timesteps self.warmstarting_scale = warmstarting_scale self.prior_model_path = prior_model_path super().__init__( policy, env, learning_rate, buffer_size, learning_starts, batch_size, tau, gamma, train_freq, gradient_steps, action_noise, replay_buffer_class=replay_buffer_class, replay_buffer_kwargs=replay_buffer_kwargs, optimize_memory_usage=optimize_memory_usage, ent_coef=ent_coef, target_update_interval=target_update_interval, target_entropy=target_entropy, use_sde=use_sde, sde_sample_freq=sde_sample_freq, use_sde_at_warmup=use_sde_at_warmup, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, seed=seed, device=device, _init_setup_model=_init_setup_model, ) def _setup_model(self) -> None: super()._setup_model() self.policy.prior_model = SAC.load(self.prior_model_path,env=self.env) self.policy.prior_model.policy.set_training_mode(False) # freeze prior model parameters def train(self, gradient_steps: int, batch_size: int = 64) -> None: # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update optimizers learning rate optimizers = [self.actor.optimizer, self.critic.optimizer] if self.ent_coef_optimizer is not None: optimizers += [self.ent_coef_optimizer] # Update learning rate according to lr schedule self._update_learning_rate(optimizers) ent_coef_losses, ent_coefs = [], [] actor_losses, critic_losses = [], [] for gradient_step in range(gradient_steps): # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) # We need to sample because `log_std` may have changed between two gradient steps if self.use_sde: self.actor.reset_noise() # Action by the current actor for the sampled state actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations) log_prob = log_prob.reshape(-1, 1) # SAC and GAIL prior policy prior_log_prob = self.policy.prior_model.actor.give_action_log_prob(replay_data.observations, actions_pi) prior_log_prob = prior_log_prob.reshape(-1, 1) ent_coef_loss = None if self.ent_coef_optimizer is not None: # Important: detach the variable from the graph # so we don't change it with other losses # see https://github.com/rail-berkeley/softlearning/issues/60 ent_coef = th.exp(self.log_ent_coef.detach()) ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean() ent_coef_losses.append(ent_coef_loss.item()) else: ent_coef = self.ent_coef_tensor ent_coefs.append(ent_coef.item()) # pre reward normalize to current reward. ent_coef_prior = self.policy.prior_model.ent_coef if not isinstance(self.policy.prior_model.ent_coef, str) else ent_coef if ent_coef_prior == 0: ent_coef_prior = ent_coef if self.num_timesteps < self.warmstarting_num_timesteps: ent_coef_prior = self.warmstarting_scale * ent_coef_prior # Optimize entropy coefficient, also called # entropy temperature or alpha in the paper if ent_coef_loss is not None: self.ent_coef_optimizer.zero_grad() ent_coef_loss.backward() self.ent_coef_optimizer.step() with th.no_grad(): # Select action according to policy next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) next_log_prob = next_log_prob.reshape(-1, 1) # SAC and GAIL next_prior_log_prob = self.policy.prior_model.actor.give_action_log_prob(replay_data.next_observations, next_actions) next_prior_log_prob = next_prior_log_prob.reshape(-1, 1) # Compute the next Q values: min over all critics targets next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1) next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True) # add entropy term and prior policy logprob next_q_values = next_q_values + ent_coef_prior * next_prior_log_prob - ent_coef * next_log_prob # td error + entropy term target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values # Get current Q-values estimates for each critic network # using action from the replay buffer current_q_values = self.critic(replay_data.observations, replay_data.actions) # Compute critic loss critic_loss = 0.5 * sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values) critic_losses.append(critic_loss.item()) # Optimize the critic self.critic.optimizer.zero_grad() critic_loss.backward() self.critic.optimizer.step() # Compute actor loss # Alternative: actor_loss = th.mean(log_prob - qf1_pi) # Min over all critic networks q_values_pi = th.cat(self.critic(replay_data.observations, actions_pi), dim=1) min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True) actor_loss = (ent_coef * log_prob - min_qf_pi - ent_coef_prior * prior_log_prob).mean() actor_losses.append(actor_loss.item()) # Optimize the actor self.actor.optimizer.zero_grad() actor_loss.backward() self.actor.optimizer.step() # Update target networks if gradient_step % self.target_update_interval == 0:
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
6
2023-10-28 01:09:21+00:00
24k
pytabular-ai/auto-scikit-dl
utils/model.py
[ { "identifier": "MLP", "path": "models/mlp.py", "snippet": "class MLP(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ...
import os import time import json import yaml import shutil import random import datetime import numpy as np import torch import optuna from pathlib import Path from typing import Dict, List, Tuple, Union, Optional, Literal from models import MLP, FTTransformer, AutoInt, DCNv2, NODE from models.abstract import TabModel, check_dir from data.utils import Dataset from data.processor import DataProcessor
15,711
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda', ) -> TabModel: """Process Model Configs and Call Specific Model APIs""" assert model_name in MODEL_CARDS, f"unrecognized `{model_name}` model name, choose one of valid models in {MODEL_CARDS}" if isinstance(model_config, str): model_config = load_config_from_file(model_config)['model'] if MODEL_CARDS[model_name] is None: raise NotImplementedError("Please add corresponding model implementation to `models` module") if sparsity_scheme is not None: assert 'mlp' in model_name return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels, sparsity_scheme=sparsity_scheme) return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels) def tune( model_name: str = None, search_config: Union[dict, str] = None,
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda', ) -> TabModel: """Process Model Configs and Call Specific Model APIs""" assert model_name in MODEL_CARDS, f"unrecognized `{model_name}` model name, choose one of valid models in {MODEL_CARDS}" if isinstance(model_config, str): model_config = load_config_from_file(model_config)['model'] if MODEL_CARDS[model_name] is None: raise NotImplementedError("Please add corresponding model implementation to `models` module") if sparsity_scheme is not None: assert 'mlp' in model_name return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels, sparsity_scheme=sparsity_scheme) return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels) def tune( model_name: str = None, search_config: Union[dict, str] = None,
dataset: Dataset = None,
7
2023-10-30 14:55:44+00:00
24k
hyperspy/exspy
exspy/tests/models/test_eelsmodel.py
[ { "identifier": "elements_db", "path": "exspy/misc/elements.py", "snippet": "" }, { "identifier": "_GOSH_URL", "path": "exspy/misc/eels/gosh_gos.py", "snippet": "_GOSH_URL = f\"doi:{_GOSH_DOI}/Segger_Guzzinati_Kohl_1.5.0.gosh\"" }, { "identifier": "_GOSH_KNOWN_HASH", "path": ...
import contextlib import io import numpy as np import pooch import pytest import hyperspy.api as hs from unittest import mock from exspy.misc.elements import elements_db as elements from hyperspy.decorators import lazifyTestClass from exspy.misc.eels.gosh_gos import _GOSH_URL, _GOSH_KNOWN_HASH from exspy.signals import EELSSpectrum from exspy.models.eelsmodel import EELSModel from hyperspy.components1d import PowerLaw from hyperspy.components1d import PowerLaw
18,768
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. # Dask does not always work nicely with np.errstate, # see: https://github.com/dask/dask/issues/3245, so # filter out divide-by-zero warnings that only appear # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. @pytest.mark.filterwarnings( "ignore:invalid value encountered in subtract:RuntimeWarning" ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: def setup_method(self, method):
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. # Dask does not always work nicely with np.errstate, # see: https://github.com/dask/dask/issues/3245, so # filter out divide-by-zero warnings that only appear # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. @pytest.mark.filterwarnings( "ignore:invalid value encountered in subtract:RuntimeWarning" ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: def setup_method(self, method):
s = EELSSpectrum(np.zeros(200))
3
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n ...
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
17,454
#### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name) text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda() else: tokenizer = AutoTokenizer.from_pretrained(args.bert_model_name,do_lower_case=True, local_files_only=True) text_encoder = CLP_clinical(bert_model_name=args.bert_model_name).cuda() if args.bert_pretrained: checkpoint = torch.load(args.bert_pretrained, map_location='cpu') state_dict = checkpoint["state_dict"] text_encoder.load_state_dict(state_dict) print('Load pretrained bert success from: ',args.bert_pretrained) if args.freeze_bert: for param in text_encoder.parameters(): param.requires_grad = False if args.add_dataset: if 'lam' in config: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim, lam = config['lam']).cuda() else: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim).cuda() else: if 'lam' in config: model = TQN_Model(class_num = args.class_num, lam = config['lam']).cuda() else: model = TQN_Model(class_num = args.class_num).cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) model_without_ddp = model.module if args.finetune: image_encoder_without_ddp = image_encoder else: image_encoder = torch.nn.parallel.DistributedDataParallel(image_encoder, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) image_encoder_without_ddp = image_encoder.module text_encoder_without_ddp = text_encoder arg_opt = utils.AttrDict(config['optimizer'])
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name) text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda() else: tokenizer = AutoTokenizer.from_pretrained(args.bert_model_name,do_lower_case=True, local_files_only=True) text_encoder = CLP_clinical(bert_model_name=args.bert_model_name).cuda() if args.bert_pretrained: checkpoint = torch.load(args.bert_pretrained, map_location='cpu') state_dict = checkpoint["state_dict"] text_encoder.load_state_dict(state_dict) print('Load pretrained bert success from: ',args.bert_pretrained) if args.freeze_bert: for param in text_encoder.parameters(): param.requires_grad = False if args.add_dataset: if 'lam' in config: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim, lam = config['lam']).cuda() else: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim).cuda() else: if 'lam' in config: model = TQN_Model(class_num = args.class_num, lam = config['lam']).cuda() else: model = TQN_Model(class_num = args.class_num).cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) model_without_ddp = model.module if args.finetune: image_encoder_without_ddp = image_encoder else: image_encoder = torch.nn.parallel.DistributedDataParallel(image_encoder, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) image_encoder_without_ddp = image_encoder.module text_encoder_without_ddp = text_encoder arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model_without_ddp,image_encoder_without_ddp,text_encoder_without_ddp)
2
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strin...
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
14,974
If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem filepath = save_data_to_filesystem( encrypted_byte_data, self.config.database.directory, str(data_hash) ) bt.logging.trace(f"stored data {data_hash} in filepath: {filepath}") # Add the initial chunk, size, and validator seed information await store_chunk_metadata( self.database, data_hash, filepath, synapse.dendrite.hotkey, sys.getsizeof(encrypted_byte_data), synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment(
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem filepath = save_data_to_filesystem( encrypted_byte_data, self.config.database.directory, str(data_hash) ) bt.logging.trace(f"stored data {data_hash} in filepath: {filepath}") # Add the initial chunk, size, and validator seed information await store_chunk_metadata( self.database, data_hash, filepath, synapse.dendrite.hotkey, sys.getsizeof(encrypted_byte_data), synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment(
hex_to_ecc_point(synapse.g, synapse.curve),
4
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/main.py
[ { "identifier": "logger", "path": "memgpt/log.py", "snippet": "" }, { "identifier": "CLIInterface", "path": "memgpt/interface.py", "snippet": "class CLIInterface(AgentInterface):\r\n \"\"\"Basic interface for dumping agent events to the command-line\"\"\"\r\n\r\n @staticmethod\r\n ...
import shutil import configparser import uuid import logging import glob import os import sys import pickle import traceback import json import questionary import typer import memgpt.agent as agent import memgpt.system as system import memgpt.constants as constants import memgpt.errors as errors from rich.console import Console from prettytable import PrettyTable from memgpt.log import logger from memgpt.interface import CLIInterface as interface # for printing to terminal from memgpt.config import MemGPTConfig from memgpt.cli.cli import run, attach, version, server, open_folder, quickstart, migrate from memgpt.cli.cli_config import configure, list, add, delete from memgpt.cli.cli_load import app as load_app from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.metadata import MetadataStore, save_agent
17,401
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version) app.command(name="attach")(attach)
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version) app.command(name="attach")(attach)
app.command(name="configure")(configure)
10
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_s...
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from diffusers.models import AutoencoderKL from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from copy import deepcopy from tqdm import tqdm from diffusion import IDDPM from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.lcm_scheduler import LCMScheduler from torchvision.utils import save_image from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,418
) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=True, resume_lr_scheduler=True) if args.debug: config.log_interval = 1 config.train_batch_size = 11 config.valid_num = 100 config.load_from = None os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, snr=config.snr_loss, return_startx=True) model = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") if config.load_from is not None: if args.load_from is not None: config.load_from = args.load_from missing, unexpected = load_checkpoint(config.load_from, model, load_ema=config.get('load_ema', False)) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') model_ema = deepcopy(model).eval() model_teacher = deepcopy(model).eval() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale:
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def ema_update(model_dest: nn.Module, model_src: nn.Module, rate): param_dict_src = dict(model_src.named_parameters()) for p_name, p_dest in model_dest.named_parameters(): p_src = param_dict_src[p_name] assert p_src is not p_dest p_dest.data.mul_(rate).add_((1 - rate) * p_src.data) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def log_validation(model, step, device): if hasattr(model, 'module'): model = model.module scheduler = LCMScheduler(beta_start=0.0001, beta_end=0.02, beta_schedule="linear", prediction_type="epsilon") scheduler.set_timesteps(4, 50) infer_timesteps = scheduler.timesteps dog_embed = torch.load('data/tmp/dog.pth', map_location='cpu') caption_embs, emb_masks = dog_embed['dog_text'].to(device), dog_embed['dog_mask'].to(device) hw = torch.tensor([[1024, 1024]], dtype=torch.float, device=device).repeat(1, 1) ar = torch.tensor([[1.]], device=device).repeat(1, 1) # Create sampling noise: infer_latents = torch.randn(1, 4, 1024, 1024, device=device) model_kwargs = dict(data_info={'img_hw': hw, 'aspect_ratio': ar}, mask=emb_masks) logger.info("Running validation... ") # 7. LCM MultiStep Sampling Loop: for i, t in tqdm(list(enumerate(infer_timesteps))): ts = torch.full((1,), t, device=device, dtype=torch.long) # model prediction (v-prediction, eps, x) model_pred = model(infer_latents, ts, caption_embs, **model_kwargs)[:, :4] # compute the previous noisy sample x_t -> x_t-1 infer_latents, denoised = scheduler.step(model_pred, i, t, infer_latents, return_dict=False) samples = vae.decode(denoised / 0.18215).sample torch.cuda.empty_cache() save_image(samples[0], f'output_cv/vis/{step}.jpg', nrow=1, normalize=True, value_range=(-1, 1)) def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = model.module.y_embedder.y_embedding.repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = z * config.scale_factor y = batch[1] y_mask = batch[2] data_info = batch[3] # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=True, resume_lr_scheduler=True) if args.debug: config.log_interval = 1 config.train_batch_size = 11 config.valid_num = 100 config.load_from = None os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, snr=config.snr_loss, return_startx=True) model = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") if config.load_from is not None: if args.load_from is not None: config.load_from = args.load_from missing, unexpected = load_checkpoint(config.load_from, model, load_ema=config.get('load_ema', False)) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') model_ema = deepcopy(model).eval() model_teacher = deepcopy(model).eval() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale:
batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset,
18
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
train_emernerf.py
[ { "identifier": "metrics", "path": "datasets/metrics.py", "snippet": "def compute_valid_depth_rmse(prediction: Tensor, target: Tensor) -> float:\ndef compute_psnr(prediction: Tensor, target: Tensor) -> float:\ndef compute_ssim(\n prediction: Union[Tensor, np.ndarray], target: Union[Tensor, np.ndarray...
import argparse import json import logging import os import time import imageio import numpy as np import torch import torch.utils.data import builders import loss import utils.misc as misc import wandb from typing import List, Optional from omegaconf import OmegaConf from tqdm import tqdm from datasets import metrics from datasets.base import SceneDataset from radiance_fields import DensityField, RadianceField from radiance_fields.render_utils import render_rays from radiance_fields.video_utils import render_pixels, save_videos from third_party.nerfacc_prop_net import PropNetEstimator, get_proposal_requires_grad_fn from utils.logging import MetricLogger, setup_logging from utils.visualization_tools import visualize_voxels, visualize_scene_flow from datasets.waymo import WaymoDataset from datasets.nuscenes import NuScenesDataset
19,797
) parser.add_argument( "--render_data_video_only", action="store_true", help="Quit after rendering a data video", ) parser.add_argument( "--render_video_postfix", type=str, default=None, help="an optional postfix for video", ) parser.add_argument( "--output_root", default="./work_dirs/", help="path to save checkpoints and logs", type=str, ) # wandb logging part parser.add_argument( "--enable_wandb", action="store_true", help="enable wandb logging" ) parser.add_argument( "--entity", default="YOUR ENTITY NAME", type=str, help="wandb entity name", required=False, ) parser.add_argument( "--project", default="emernerf", type=str, help="wandb project name, also used to enhance log_dir", required=True, ) parser.add_argument( "--run_name", default="debug", type=str, help="wandb run name, also used to enhance log_dir", required=True, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def setup(args): # ------ get config from args -------- # default_config = OmegaConf.create(OmegaConf.load("configs/default_config.yaml")) cfg = OmegaConf.load(args.config_file) cfg = OmegaConf.merge(default_config, cfg, OmegaConf.from_cli(args.opts)) log_dir = os.path.join(args.output_root, args.project, args.run_name) cfg.log_dir = log_dir cfg.nerf.model.num_cams = cfg.data.pixel_source.num_cams cfg.nerf.model.unbounded = cfg.nerf.unbounded cfg.nerf.propnet.unbounded = cfg.nerf.unbounded cfg.nerf.model.resume_from = cfg.resume_from os.makedirs(log_dir, exist_ok=True) for folder in [ "images", "full_videos", "test_videos", "lowres_videos", "metrics", "configs_bk", "buffer_maps", ]: os.makedirs(os.path.join(log_dir, folder), exist_ok=True) # ------ setup logging -------- # if args.enable_wandb: # sometimes wandb fails to init in cloud machines, so we give it several (many) tries while ( wandb.init( project=args.project, entity=args.entity, sync_tensorboard=True, settings=wandb.Settings(start_method="fork"), ) is not wandb.run ): continue wandb.run.name = args.run_name wandb.run.save() wandb.config.update(OmegaConf.to_container(cfg, resolve=True)) wandb.config.update(args) misc.fix_random_seeds(cfg.optim.seed) global logger setup_logging(output=log_dir, level=logging.INFO, time_string=current_time) logger.info( "\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())) ) # -------- write config -------- # logger.info(f"Config:\n{OmegaConf.to_yaml(cfg)}") saved_cfg_path = os.path.join(log_dir, "config.yaml") with open(saved_cfg_path, "w") as f: OmegaConf.save(config=cfg, f=f) # also save a backup copy saved_cfg_path_bk = os.path.join( log_dir, "configs_bk", f"config_{current_time}.yaml" ) with open(saved_cfg_path_bk, "w") as f: OmegaConf.save(config=cfg, f=f) logger.info(f"Full config saved to {saved_cfg_path}, and {saved_cfg_path_bk}") return cfg @torch.no_grad() def do_evaluation( step: int = 0, cfg: OmegaConf = None, model: RadianceField = None, proposal_networks: Optional[List[DensityField]] = None,
logger = logging.getLogger() current_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) # a global list of keys to render, # comment out the keys you don't want to render or uncomment the keys you want to render render_keys = [ "gt_rgbs", "rgbs", "depths", # "median_depths", "gt_dino_feats", "dino_feats", "dynamic_rgbs", "dynamic_depths", "static_rgbs", "static_depths", "forward_flows", "backward_flows", "dynamic_rgb_on_static_dinos", "dino_pe", "dino_feats_pe_free", # "dynamic_dino_on_static_rgbs", # "shadow_reduced_static_rgbs", # "shadow_only_static_rgbs", # "shadows", # "gt_sky_masks", # "sky_masks", ] def get_args_parser(): parser = argparse.ArgumentParser("Train EmernNerf for a single scene") parser.add_argument("--config_file", help="path to config file", type=str) parser.add_argument( "--eval_only", action="store_true", help="perform evaluation only" ) parser.add_argument( "--visualize_voxel", action="store_true", help="perform evaluation only" ) parser.add_argument( "--render_data_video", action="store_true", help="Render a data video", ) parser.add_argument( "--render_data_video_only", action="store_true", help="Quit after rendering a data video", ) parser.add_argument( "--render_video_postfix", type=str, default=None, help="an optional postfix for video", ) parser.add_argument( "--output_root", default="./work_dirs/", help="path to save checkpoints and logs", type=str, ) # wandb logging part parser.add_argument( "--enable_wandb", action="store_true", help="enable wandb logging" ) parser.add_argument( "--entity", default="YOUR ENTITY NAME", type=str, help="wandb entity name", required=False, ) parser.add_argument( "--project", default="emernerf", type=str, help="wandb project name, also used to enhance log_dir", required=True, ) parser.add_argument( "--run_name", default="debug", type=str, help="wandb run name, also used to enhance log_dir", required=True, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def setup(args): # ------ get config from args -------- # default_config = OmegaConf.create(OmegaConf.load("configs/default_config.yaml")) cfg = OmegaConf.load(args.config_file) cfg = OmegaConf.merge(default_config, cfg, OmegaConf.from_cli(args.opts)) log_dir = os.path.join(args.output_root, args.project, args.run_name) cfg.log_dir = log_dir cfg.nerf.model.num_cams = cfg.data.pixel_source.num_cams cfg.nerf.model.unbounded = cfg.nerf.unbounded cfg.nerf.propnet.unbounded = cfg.nerf.unbounded cfg.nerf.model.resume_from = cfg.resume_from os.makedirs(log_dir, exist_ok=True) for folder in [ "images", "full_videos", "test_videos", "lowres_videos", "metrics", "configs_bk", "buffer_maps", ]: os.makedirs(os.path.join(log_dir, folder), exist_ok=True) # ------ setup logging -------- # if args.enable_wandb: # sometimes wandb fails to init in cloud machines, so we give it several (many) tries while ( wandb.init( project=args.project, entity=args.entity, sync_tensorboard=True, settings=wandb.Settings(start_method="fork"), ) is not wandb.run ): continue wandb.run.name = args.run_name wandb.run.save() wandb.config.update(OmegaConf.to_container(cfg, resolve=True)) wandb.config.update(args) misc.fix_random_seeds(cfg.optim.seed) global logger setup_logging(output=log_dir, level=logging.INFO, time_string=current_time) logger.info( "\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())) ) # -------- write config -------- # logger.info(f"Config:\n{OmegaConf.to_yaml(cfg)}") saved_cfg_path = os.path.join(log_dir, "config.yaml") with open(saved_cfg_path, "w") as f: OmegaConf.save(config=cfg, f=f) # also save a backup copy saved_cfg_path_bk = os.path.join( log_dir, "configs_bk", f"config_{current_time}.yaml" ) with open(saved_cfg_path_bk, "w") as f: OmegaConf.save(config=cfg, f=f) logger.info(f"Full config saved to {saved_cfg_path}, and {saved_cfg_path_bk}") return cfg @torch.no_grad() def do_evaluation( step: int = 0, cfg: OmegaConf = None, model: RadianceField = None, proposal_networks: Optional[List[DensityField]] = None,
proposal_estimator: PropNetEstimator = None,
7
2023-10-11 20:56:27+00:00
24k